From 1bda3f8087fce9063da0b8aef87f17a3fe541aca Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 21 Feb 2018 05:17:26 +0100 Subject: sched/isolation: Isolate workqueues when "nohz_full=" is set As we prepare for offloading the residual 1hz scheduler ticks to workqueue, let's affine those to housekeepers so that they don't interrupt the CPUs that don't want to be disturbed. Signed-off-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: Chris Metcalf Cc: Christoph Lameter Cc: Linus Torvalds Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Rik van Riel Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1519186649-3242-5-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar --- kernel/workqueue.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 017044c26233..593dbe749174 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5565,12 +5565,13 @@ static void __init wq_numa_init(void) int __init workqueue_init_early(void) { int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; + int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; int i, cpu; WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); - cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN)); + cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags)); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); -- cgit v1.2.3-59-g8ed1b From 537f4146c53c95aac977852b371bafb9c6755ee1 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 6 Mar 2018 15:35:43 +0530 Subject: workqueue: use put_device() instead of kfree() Never directly free @dev after calling device_register(), even if it returned an error! Always use put_device() to give up the reference initialized in this function instead. Signed-off-by: Arvind Yadav Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bb9a519cbf50..ccd1080dd6e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5337,7 +5337,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) ret = device_register(&wq_dev->dev); if (ret) { - kfree(wq_dev); + put_device(&wq_dev->dev); wq->wq_dev = NULL; return ret; } -- cgit v1.2.3-59-g8ed1b From 6417250d3f894e66a68ba1cd93676143f2376a6f Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 6 Mar 2018 19:34:42 -0800 Subject: workqueue: remove unused cancel_work() Found this by accident. There are no usages of bare cancel_work() in current kernel source. Signed-off-by: Stephen Hemminger Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 1 - kernel/workqueue.c | 8 -------- 2 files changed, 9 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index bc0cda180c8b..0c3301421c57 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -456,7 +456,6 @@ extern int schedule_on_each_cpu(work_func_t func); int execute_in_process_context(work_func_t fn, struct execute_work *); extern bool flush_work(struct work_struct *work); -extern bool cancel_work(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work); extern bool flush_delayed_work(struct delayed_work *dwork); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ccd1080dd6e7..6ec6ba65127b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3018,14 +3018,6 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) return ret; } -/* - * See cancel_delayed_work() - */ -bool cancel_work(struct work_struct *work) -{ - return __cancel_work(work, false); -} - /** * cancel_delayed_work - cancel a delayed work * @dwork: delayed_work to cancel -- cgit v1.2.3-59-g8ed1b From 05f0fe6b74dbd7690a4cbd61810948b7d575576a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Mar 2018 12:45:13 -0700 Subject: RCU, workqueue: Implement rcu_work There are cases where RCU callback needs to be bounced to a sleepable context. This is currently done by the RCU callback queueing a work item, which can be cumbersome to write and confusing to read. This patch introduces rcu_work, a workqueue work variant which gets executed after a RCU grace period, and converts the open coded bouncing in fs/aio and kernel/cgroup. v3: Dropped queue_rcu_work_on(). Documented rcu grace period behavior after queue_rcu_work(). v2: Use rcu_barrier() instead of synchronize_rcu() to wait for completion of previously queued rcu callback as per Paul. Signed-off-by: Tejun Heo Acked-by: "Paul E. McKenney" Cc: Linus Torvalds --- include/linux/workqueue.h | 23 ++++++++++++++++++++ kernel/workqueue.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) (limited to 'kernel/workqueue.c') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index bc0cda180c8b..d026f8f818cc 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -13,6 +13,7 @@ #include #include #include +#include struct workqueue_struct; @@ -120,6 +121,14 @@ struct delayed_work { int cpu; }; +struct rcu_work { + struct work_struct work; + struct rcu_head rcu; + + /* target workqueue ->rcu uses to queue ->work */ + struct workqueue_struct *wq; +}; + /** * struct workqueue_attrs - A struct for workqueue attributes. * @@ -151,6 +160,11 @@ static inline struct delayed_work *to_delayed_work(struct work_struct *work) return container_of(work, struct delayed_work, work); } +static inline struct rcu_work *to_rcu_work(struct work_struct *work) +{ + return container_of(work, struct rcu_work, work); +} + struct execute_work { struct work_struct work; }; @@ -266,6 +280,12 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) +#define INIT_RCU_WORK(_work, _func) \ + INIT_WORK(&(_work)->work, (_func)) + +#define INIT_RCU_WORK_ONSTACK(_work, _func) \ + INIT_WORK_ONSTACK(&(_work)->work, (_func)) + /** * work_pending - Find out whether a work item is currently pending * @work: The work item in question @@ -447,6 +467,7 @@ extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); +extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); extern void flush_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq); @@ -463,6 +484,8 @@ extern bool flush_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work_sync(struct delayed_work *dwork); +extern bool flush_rcu_work(struct rcu_work *rwork); + extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); extern struct work_struct *current_work(void); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bb9a519cbf50..7df85fa9f651 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1604,6 +1604,40 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(mod_delayed_work_on); +static void rcu_work_rcufn(struct rcu_head *rcu) +{ + struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); + + /* read the comment in __queue_work() */ + local_irq_disable(); + __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); + local_irq_enable(); +} + +/** + * queue_rcu_work - queue work after a RCU grace period + * @wq: workqueue to use + * @rwork: work to queue + * + * Return: %false if @rwork was already pending, %true otherwise. Note + * that a full RCU grace period is guaranteed only after a %true return. + * While @rwork is guarnateed to be executed after a %false return, the + * execution may happen before a full RCU grace period has passed. + */ +bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) +{ + struct work_struct *work = &rwork->work; + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + rwork->wq = wq; + call_rcu(&rwork->rcu, rcu_work_rcufn); + return true; + } + + return false; +} +EXPORT_SYMBOL(queue_rcu_work); + /** * worker_enter_idle - enter idle state * @worker: worker which is entering idle state @@ -3001,6 +3035,26 @@ bool flush_delayed_work(struct delayed_work *dwork) } EXPORT_SYMBOL(flush_delayed_work); +/** + * flush_rcu_work - wait for a rwork to finish executing the last queueing + * @rwork: the rcu work to flush + * + * Return: + * %true if flush_rcu_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_rcu_work(struct rcu_work *rwork) +{ + if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { + rcu_barrier(); + flush_work(&rwork->work); + return true; + } else { + return flush_work(&rwork->work); + } +} +EXPORT_SYMBOL(flush_rcu_work); + static bool __cancel_work(struct work_struct *work, bool is_dwork) { unsigned long flags; -- cgit v1.2.3-59-g8ed1b From 5826cc8f5a6094ce6565a8e0008c9f07ebf724b1 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 20 Mar 2018 17:24:05 +0800 Subject: workqueue: fix the comments of nr_idle Since the worker rebinding behavior was refactored, there is no idle worker off the idle_list now. The comment is outdated and can be just removed. It also groups nr_workers and nr_idle together. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7df85fa9f651..eeebfa65b0ef 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -153,10 +153,9 @@ struct worker_pool { unsigned long watchdog_ts; /* L: watchdog timestamp */ struct list_head worklist; /* L: list of pending works */ - int nr_workers; /* L: total number of workers */ - /* nr_idle includes the ones off idle_list for rebinding */ - int nr_idle; /* L: currently idle ones */ + int nr_workers; /* L: total number of workers */ + int nr_idle; /* L: currently idle workers */ struct list_head idle_list; /* X: list of idle workers */ struct timer_list idle_timer; /* L: worker idle timeout */ -- cgit v1.2.3-59-g8ed1b From f75da8a8a918d7c343a2ec95d1ed99e5689e0f23 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 20 Mar 2018 17:24:32 +0800 Subject: workqueue: remove the comment about the old manager_arb mutex The manager_arb mutex doesn't exist any more. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index eeebfa65b0ef..636c6d702b93 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -165,7 +165,6 @@ struct worker_pool { DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); /* L: hash of busy workers */ - /* see manage_workers() for details on the two manager mutexes */ struct worker *manager; /* L: purely informational */ struct mutex attach_mutex; /* attach/detach exclusion */ struct list_head workers; /* A: attached workers */ -- cgit v1.2.3-59-g8ed1b