aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2013-10-07 11:29:41 +0100
committerIngo Molnar <mingo@kernel.org>2013-10-09 14:48:25 +0200
commit2739d3eef3a93a92c366a3a0bb85a0afe09e8b8c (patch)
tree74f70c1af9469a659758d8caaacaafafe17df28f /kernel/sched
parentsched/numa: Use unsigned longs for numa group fault stats (diff)
downloadlinux-dev-2739d3eef3a93a92c366a3a0bb85a0afe09e8b8c.tar.xz
linux-dev-2739d3eef3a93a92c366a3a0bb85a0afe09e8b8c.zip
sched/numa: Retry task_numa_migrate() periodically
Short spikes of CPU load can lead to a task being migrated away from its preferred node for temporary reasons. It is important that the task is migrated back to where it belongs, in order to avoid migrating too much memory to its new location, and generally disturbing a task's NUMA location. This patch fixes NUMA placement for 4 specjbb instances on a 4 node system. Without this patch, things take longer to converge, and processes are not always completely on their own node. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-64-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5b2208e504a4..e9149305c5fa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1259,18 +1259,19 @@ static int task_numa_migrate(struct task_struct *p)
/* Attempt to migrate a task to a CPU on the preferred node. */
static void numa_migrate_preferred(struct task_struct *p)
{
- /* Success if task is already running on preferred CPU */
- p->numa_migrate_retry = 0;
- if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
+ /* This task has no NUMA fault statistics yet */
+ if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
return;
- /* This task has no NUMA fault statistics yet */
- if (unlikely(p->numa_preferred_nid == -1))
+ /* Periodically retry migrating the task to the preferred node */
+ p->numa_migrate_retry = jiffies + HZ;
+
+ /* Success if task is already running on preferred CPU */
+ if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
return;
/* Otherwise, try migrate to a CPU on the preferred node */
- if (task_numa_migrate(p) != 0)
- p->numa_migrate_retry = jiffies + HZ*5;
+ task_numa_migrate(p);
}
/*
@@ -1629,8 +1630,11 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
task_numa_placement(p);
- /* Retry task to preferred node migration if it previously failed */
- if (p->numa_migrate_retry && time_after(jiffies, p->numa_migrate_retry))
+ /*
+ * Retry task to preferred node migration periodically, in case it
+ * case it previously failed, or the scheduler moved us.
+ */
+ if (time_after(jiffies, p->numa_migrate_retry))
numa_migrate_preferred(p);
if (migrated)