aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-01-15 19:08:36 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-01-22 15:09:43 +0100
commit640f17c82460e9724fd256f0a1f5d99e7ff0bda4 (patch)
tree843671442486fff2c92b7b7df424524a2f4fd96a /kernel/workqueue.c
parentworkqueue: Tag bound workers with KTHREAD_IS_PER_CPU (diff)
downloadlinux-dev-640f17c82460e9724fd256f0a1f5d99e7ff0bda4.tar.xz
linux-dev-640f17c82460e9724fd256f0a1f5d99e7ff0bda4.zip
workqueue: Restrict affinity change to rescuer
create_worker() will already set the right affinity using kthread_bind_mask(), this means only the rescuer will need to change it's affinity. Howveer, while in cpu-hot-unplug a regular task is not allowed to run on online&&!active as it would be pushed away quite agressively. We need KTHREAD_IS_PER_CPU to survive in that environment. Therefore set the affinity after getting that magic flag. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210121103506.826629830@infradead.org
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index cce34336fbd7..894bb885b40b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1849,12 +1849,6 @@ static void worker_attach_to_pool(struct worker *worker,
mutex_lock(&wq_pool_attach_mutex);
/*
- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
- * online CPUs. It'll be re-applied when any of the CPUs come up.
- */
- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
- /*
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
* stable across this function. See the comments above the flag
* definition for details.
@@ -1864,6 +1858,9 @@ static void worker_attach_to_pool(struct worker *worker,
else
kthread_set_per_cpu(worker->task, pool->cpu);
+ if (worker->rescue_wq)
+ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
+
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;