aboutsummaryrefslogtreecommitdiffstats
path: root/lib/cpumask.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2020-03-10 18:01:13 -0700
committerPeter Zijlstra <peterz@infradead.org>2020-03-20 13:06:18 +0100
commit46a87b3851f0d6eb05e6d83d5c5a30df0eca8f76 (patch)
treeb98cf01a4a12e708f063c07788f7a3a7b41a93f2 /lib/cpumask.c
parentsched/fair: Fix enqueue_task_fair warning (diff)
downloadlinux-dev-46a87b3851f0d6eb05e6d83d5c5a30df0eca8f76.tar.xz
linux-dev-46a87b3851f0d6eb05e6d83d5c5a30df0eca8f76.zip
sched/core: Distribute tasks within affinity masks
Currently, when updating the affinity of tasks via either cpusets.cpus, or, sched_setaffinity(); tasks not currently running within the newly specified mask will be arbitrarily assigned to the first CPU within the mask. This (particularly in the case that we are restricting masks) can result in many tasks being assigned to the first CPUs of their new masks. This: 1) Can induce scheduling delays while the load-balancer has a chance to spread them between their new CPUs. 2) Can antogonize a poor load-balancer behavior where it has a difficult time recognizing that a cross-socket imbalance has been forced by an affinity mask. This change adds a new cpumask interface to allow iterated calls to distribute within the intersection of the provided masks. The cases that this mainly affects are: - modifying cpuset.cpus - when tasks join a cpuset - when modifying a task's affinity via sched_setaffinity(2) Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Josh Don <joshdon@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Qais Yousef <qais.yousef@arm.com> Tested-by: Qais Yousef <qais.yousef@arm.com> Link: https://lkml.kernel.org/r/20200311010113.136465-1-joshdon@google.com
Diffstat (limited to 'lib/cpumask.c')
-rw-r--r--lib/cpumask.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 0cb672eb107c..fb22fb266f93 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -232,3 +232,32 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
BUG();
}
EXPORT_SYMBOL(cpumask_local_spread);
+
+static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
+
+/**
+ * Returns an arbitrary cpu within srcp1 & srcp2.
+ *
+ * Iterated calls using the same srcp1 and srcp2 will be distributed within
+ * their intersection.
+ *
+ * Returns >= nr_cpu_ids if the intersection is empty.
+ */
+int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ int next, prev;
+
+ /* NOTE: our first selection will skip 0. */
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
+
+ next = cpumask_next_and(prev, src1p, src2p);
+ if (next >= nr_cpu_ids)
+ next = cpumask_first_and(src1p, src2p);
+
+ if (next < nr_cpu_ids)
+ __this_cpu_write(distribute_cpu_mask_prev, next);
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_any_and_distribute);