diff options
author | 2025-05-27 20:49:06 -0700 | |
---|---|---|
committer | 2025-05-27 20:49:06 -0700 | |
commit | 91ad250cbe57855362c99642a454294e8d314c7d (patch) | |
tree | 9da704e973df63e2789274f729cad7d9c7e803d5 /kernel | |
parent | Merge tag 'sysctl-6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/sysctl/sysctl (diff) | |
parent | workqueue: fix typo in comment (diff) | |
download | wireguard-linux-91ad250cbe57855362c99642a454294e8d314c7d.tar.xz wireguard-linux-91ad250cbe57855362c99642a454294e8d314c7d.zip |
Merge tag 'wq-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo:
"Fix statistic update race condition and a couple documentation
updates"
* tag 'wq-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: fix typo in comment
workqueue: Fix race condition in wq->stats incrementation
workqueue: Better document teardown for delayed_work
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9e6cf45f0972..3bef0754cf73 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3241,7 +3241,7 @@ __acquires(&pool->lock) * point will only record its address. */ trace_workqueue_execute_end(work, worker->current_func); - pwq->stats[PWQ_STAT_COMPLETED]++; + lock_map_release(&lockdep_map); if (!bh_draining) lock_map_release(pwq->wq->lockdep_map); @@ -3272,6 +3272,8 @@ __acquires(&pool->lock) raw_spin_lock_irq(&pool->lock); + pwq->stats[PWQ_STAT_COMPLETED]++; + /* * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked * CPU intensive by wq_worker_tick() if @work hogged CPU longer than @@ -5837,6 +5839,17 @@ static bool pwq_busy(struct pool_workqueue *pwq) * @wq: target workqueue * * Safely destroy a workqueue. All work currently pending will be done first. + * + * This function does NOT guarantee that non-pending work that has been + * submitted with queue_delayed_work() and similar functions will be done + * before destroying the workqueue. The fundamental problem is that, currently, + * the workqueue has no way of accessing non-pending delayed_work. delayed_work + * is only linked on the timer-side. All delayed_work must, therefore, be + * canceled before calling this function. + * + * TODO: It would be better if the problem described above wouldn't exist and + * destroy_workqueue() would cleanly cancel all pending and non-pending + * delayed_work. */ void destroy_workqueue(struct workqueue_struct *wq) { |