aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-10 14:48:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-10 14:48:22 -0700
commit2c1aca4bd3fefc57018773cc8771f71c9bfbdc1f (patch)
treed52c532985870176a65a0253ca5919bc8132063a /include/linux
parentMerge tag 'ktest-v5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-ktest (diff)
parentworkqueue: don't use wq_select_unbound_cpu() for bound works (diff)
downloadwireguard-linux-2c1aca4bd3fefc57018773cc8771f71c9bfbdc1f.tar.xz
wireguard-linux-2c1aca4bd3fefc57018773cc8771f71c9bfbdc1f.zip
Merge branch 'for-5.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fixes from Tejun Heo: "Workqueue has been incorrectly round-robining per-cpu work items. Hillf's patch fixes that. The other patch documents memory-ordering properties of workqueue operations" * 'for-5.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: don't use wq_select_unbound_cpu() for bound works workqueue: Document (some) memory-ordering properties of {queue,schedule}_work()
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/workqueue.h16
1 files changed, 16 insertions, 0 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4261d1c6e87b..e48554e6526c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
*
* We queue the work to the CPU on which it was submitted, but if the CPU dies
* it can be processed by another CPU.
+ *
+ * Memory-ordering properties: If it returns %true, guarantees that all stores
+ * preceding the call to queue_work() in the program order will be visible from
+ * the CPU which will execute @work by the time such work executes, e.g.,
+ *
+ * { x is initially 0 }
+ *
+ * CPU0 CPU1
+ *
+ * WRITE_ONCE(x, 1); [ @work is being executed ]
+ * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
+ *
+ * Forbids: r0 == true && r1 == 0
*/
static inline bool queue_work(struct workqueue_struct *wq,
struct work_struct *work)
@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
* This puts a job in the kernel-global workqueue if it was not already
* queued and leaves it in the same position on the kernel-global
* workqueue otherwise.
+ *
+ * Shares the same memory-ordering properties of queue_work(), cf. the
+ * DocBook header of queue_work().
*/
static inline bool schedule_work(struct work_struct *work)
{