From b56c0d8937e665a27d90517ee7a746d0aa05af46 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: kthread: implement kthread_worker Implement simple work processor for kthread. This is to ease using kthread. Single thread workqueue used to be used for things like this but workqueue won't guarantee fixed kthread association anymore to enable worker sharing. This can be used in cases where specific kthread association is necessary, for example, when it should have RT priority or be assigned to certain cgroup. Signed-off-by: Tejun Heo Cc: Andrew Morton --- include/linux/kthread.h | 64 +++++++++++++++++++++ kernel/kthread.c | 149 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 213 insertions(+) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index aabc8a13ba71..f93cb6979edc 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -34,4 +34,68 @@ int kthread_should_stop(void); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; +/* + * Simple work processor based on kthread. + * + * This provides easier way to make use of kthreads. A kthread_work + * can be queued and flushed using queue/flush_kthread_work() + * respectively. Queued kthread_works are processed by a kthread + * running kthread_worker_fn(). + * + * A kthread_work can't be freed while it is executing. + */ +struct kthread_work; +typedef void (*kthread_work_func_t)(struct kthread_work *work); + +struct kthread_worker { + spinlock_t lock; + struct list_head work_list; + struct task_struct *task; +}; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + wait_queue_head_t done; + atomic_t flushing; + int queue_seq; + int done_seq; +}; + +#define KTHREAD_WORKER_INIT(worker) { \ + .lock = SPIN_LOCK_UNLOCKED, \ + .work_list = LIST_HEAD_INIT((worker).work_list), \ + } + +#define KTHREAD_WORK_INIT(work, fn) { \ + .node = LIST_HEAD_INIT((work).node), \ + .func = (fn), \ + .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ + .flushing = ATOMIC_INIT(0), \ + } + +#define DEFINE_KTHREAD_WORKER(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) + +#define DEFINE_KTHREAD_WORK(work, fn) \ + struct kthread_work work = KTHREAD_WORK_INIT(work, fn) + +static inline void init_kthread_worker(struct kthread_worker *worker) +{ + *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); +} + +static inline void init_kthread_work(struct kthread_work *work, + kthread_work_func_t fn) +{ + *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); +} + +int kthread_worker_fn(void *worker_ptr); + +bool queue_kthread_work(struct kthread_worker *worker, + struct kthread_work *work); +void flush_kthread_work(struct kthread_work *work); +void flush_kthread_worker(struct kthread_worker *worker); + #endif /* _LINUX_KTHREAD_H */ diff --git a/kernel/kthread.c b/kernel/kthread.c index 83911c780175..8b63c7fee73b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include static DEFINE_SPINLOCK(kthread_create_lock); @@ -247,3 +249,150 @@ int kthreadd(void *unused) return 0; } + +/** + * kthread_worker_fn - kthread function to process kthread_worker + * @worker_ptr: pointer to initialized kthread_worker + * + * This function can be used as @threadfn to kthread_create() or + * kthread_run() with @worker_ptr argument pointing to an initialized + * kthread_worker. The started kthread will process work_list until + * the it is stopped with kthread_stop(). A kthread can also call + * this function directly after extra initialization. + * + * Different kthreads can be used for the same kthread_worker as long + * as there's only one kthread attached to it at any given time. A + * kthread_worker without an attached kthread simply collects queued + * kthread_works. + */ +int kthread_worker_fn(void *worker_ptr) +{ + struct kthread_worker *worker = worker_ptr; + struct kthread_work *work; + + WARN_ON(worker->task); + worker->task = current; +repeat: + set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ + + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + spin_lock_irq(&worker->lock); + worker->task = NULL; + spin_unlock_irq(&worker->lock); + return 0; + } + + work = NULL; + spin_lock_irq(&worker->lock); + if (!list_empty(&worker->work_list)) { + work = list_first_entry(&worker->work_list, + struct kthread_work, node); + list_del_init(&work->node); + } + spin_unlock_irq(&worker->lock); + + if (work) { + __set_current_state(TASK_RUNNING); + work->func(work); + smp_wmb(); /* wmb worker-b0 paired with flush-b1 */ + work->done_seq = work->queue_seq; + smp_mb(); /* mb worker-b1 paired with flush-b0 */ + if (atomic_read(&work->flushing)) + wake_up_all(&work->done); + } else if (!freezing(current)) + schedule(); + + try_to_freeze(); + goto repeat; +} +EXPORT_SYMBOL_GPL(kthread_worker_fn); + +/** + * queue_kthread_work - queue a kthread_work + * @worker: target kthread_worker + * @work: kthread_work to queue + * + * Queue @work to work processor @task for async execution. @task + * must have been created with kthread_worker_create(). Returns %true + * if @work was successfully queued, %false if it was already pending. + */ +bool queue_kthread_work(struct kthread_worker *worker, + struct kthread_work *work) +{ + bool ret = false; + unsigned long flags; + + spin_lock_irqsave(&worker->lock, flags); + if (list_empty(&work->node)) { + list_add_tail(&work->node, &worker->work_list); + work->queue_seq++; + if (likely(worker->task)) + wake_up_process(worker->task); + ret = true; + } + spin_unlock_irqrestore(&worker->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(queue_kthread_work); + +/** + * flush_kthread_work - flush a kthread_work + * @work: work to flush + * + * If @work is queued or executing, wait for it to finish execution. + */ +void flush_kthread_work(struct kthread_work *work) +{ + int seq = work->queue_seq; + + atomic_inc(&work->flushing); + + /* + * mb flush-b0 paired with worker-b1, to make sure either + * worker sees the above increment or we see done_seq update. + */ + smp_mb__after_atomic_inc(); + + /* A - B <= 0 tests whether B is in front of A regardless of overflow */ + wait_event(work->done, seq - work->done_seq <= 0); + atomic_dec(&work->flushing); + + /* + * rmb flush-b1 paired with worker-b0, to make sure our caller + * sees every change made by work->func(). + */ + smp_mb__after_atomic_dec(); +} +EXPORT_SYMBOL_GPL(flush_kthread_work); + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +static void kthread_flush_work_fn(struct kthread_work *work) +{ + struct kthread_flush_work *fwork = + container_of(work, struct kthread_flush_work, work); + complete(&fwork->done); +} + +/** + * flush_kthread_worker - flush all current works on a kthread_worker + * @worker: worker to flush + * + * Wait until all currently executing or pending works on @worker are + * finished. + */ +void flush_kthread_worker(struct kthread_worker *worker) +{ + struct kthread_flush_work fwork = { + KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), + COMPLETION_INITIALIZER_ONSTACK(fwork.done), + }; + + queue_kthread_work(worker, &fwork.work); + wait_for_completion(&fwork.done); +} +EXPORT_SYMBOL_GPL(flush_kthread_worker); -- cgit v1.2.3-59-g8ed1b From 7bc465605ffa90b281d6b774fcb13911636a6d45 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: ivtv: use kthread_worker instead of workqueue Upcoming workqueue updates will no longer guarantee fixed workqueue to worker kthread association, so giving RT priority to the irq worker won't work. Use kthread_worker which guarantees specific kthread association instead. This also makes setting the priority cleaner. Signed-off-by: Tejun Heo Cc: Andy Walls Cc: Andrew Morton Cc: ivtv-devel@ivtvdriver.org Cc: linux-media@vger.kernel.org --- drivers/media/video/ivtv/ivtv-driver.c | 26 ++++++++++++++++---------- drivers/media/video/ivtv/ivtv-driver.h | 8 ++++---- drivers/media/video/ivtv/ivtv-irq.c | 15 +++------------ drivers/media/video/ivtv/ivtv-irq.h | 2 +- 4 files changed, 24 insertions(+), 27 deletions(-) diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c index 1b79475ca134..49e0b1cc3544 100644 --- a/drivers/media/video/ivtv/ivtv-driver.c +++ b/drivers/media/video/ivtv/ivtv-driver.c @@ -695,6 +695,8 @@ done: */ static int __devinit ivtv_init_struct1(struct ivtv *itv) { + struct sched_param param = { .sched_priority = 99 }; + itv->base_addr = pci_resource_start(itv->pdev, 0); itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ @@ -706,13 +708,17 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv) spin_lock_init(&itv->lock); spin_lock_init(&itv->dma_reg_lock); - itv->irq_work_queues = create_singlethread_workqueue(itv->v4l2_dev.name); - if (itv->irq_work_queues == NULL) { - IVTV_ERR("Could not create ivtv workqueue\n"); + init_kthread_worker(&itv->irq_worker); + itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, + itv->v4l2_dev.name); + if (IS_ERR(itv->irq_worker_task)) { + IVTV_ERR("Could not create ivtv task\n"); return -1; } + /* must use the FIFO scheduler as it is realtime sensitive */ + sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, ¶m); - INIT_WORK(&itv->irq_work_queue, ivtv_irq_work_handler); + init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); /* start counting open_id at 1 */ itv->open_id = 1; @@ -996,7 +1002,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev, /* PCI Device Setup */ retval = ivtv_setup_pci(itv, pdev, pci_id); if (retval == -EIO) - goto free_workqueue; + goto free_worker; if (retval == -ENXIO) goto free_mem; @@ -1208,8 +1214,8 @@ free_mem: release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); if (itv->has_cx23415) release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); -free_workqueue: - destroy_workqueue(itv->irq_work_queues); +free_worker: + kthread_stop(itv->irq_worker_task); err: if (retval == 0) retval = -ENODEV; @@ -1353,9 +1359,9 @@ static void ivtv_remove(struct pci_dev *pdev) ivtv_set_irq_mask(itv, 0xffffffff); del_timer_sync(&itv->dma_timer); - /* Stop all Work Queues */ - flush_workqueue(itv->irq_work_queues); - destroy_workqueue(itv->irq_work_queues); + /* Kill irq worker */ + flush_kthread_worker(&itv->irq_worker); + kthread_stop(itv->irq_worker_task); ivtv_streams_cleanup(itv, 1); ivtv_udma_free(itv); diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h index 5b45fd2b2645..51f7f2a9cf5e 100644 --- a/drivers/media/video/ivtv/ivtv-driver.h +++ b/drivers/media/video/ivtv/ivtv-driver.h @@ -51,7 +51,7 @@ #include #include #include -#include +#include #include #include #include @@ -257,7 +257,6 @@ struct ivtv_mailbox_data { #define IVTV_F_I_DEC_PAUSED 20 /* the decoder is paused */ #define IVTV_F_I_INITED 21 /* set after first open */ #define IVTV_F_I_FAILED 22 /* set if first open failed */ -#define IVTV_F_I_WORK_INITED 23 /* worker thread was initialized */ /* Event notifications */ #define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */ @@ -663,8 +662,9 @@ struct ivtv { /* Interrupts & DMA */ u32 irqmask; /* active interrupts */ u32 irq_rr_idx; /* round-robin stream index */ - struct workqueue_struct *irq_work_queues; /* workqueue for PIO/YUV/VBI actions */ - struct work_struct irq_work_queue; /* work entry */ + struct kthread_worker irq_worker; /* kthread worker for PIO/YUV/VBI actions */ + struct task_struct *irq_worker_task; /* task for irq_worker */ + struct kthread_work irq_work; /* kthread work entry */ spinlock_t dma_reg_lock; /* lock access to DMA engine registers */ int cur_dma_stream; /* index of current stream doing DMA (-1 if none) */ int cur_pio_stream; /* index of current stream doing PIO (-1 if none) */ diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c index fea1ec33b0df..9b4faf009196 100644 --- a/drivers/media/video/ivtv/ivtv-irq.c +++ b/drivers/media/video/ivtv/ivtv-irq.c @@ -71,19 +71,10 @@ static void ivtv_pio_work_handler(struct ivtv *itv) write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); } -void ivtv_irq_work_handler(struct work_struct *work) +void ivtv_irq_work_handler(struct kthread_work *work) { - struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue); + struct ivtv *itv = container_of(work, struct ivtv, irq_work); - DEFINE_WAIT(wait); - - if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) { - struct sched_param param = { .sched_priority = 99 }; - - /* This thread must use the FIFO scheduler as it - is realtime sensitive. */ - sched_setscheduler(current, SCHED_FIFO, ¶m); - } if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags)) ivtv_pio_work_handler(itv); @@ -975,7 +966,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id) } if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { - queue_work(itv->irq_work_queues, &itv->irq_work_queue); + queue_kthread_work(&itv->irq_worker, &itv->irq_work); } spin_unlock(&itv->dma_reg_lock); diff --git a/drivers/media/video/ivtv/ivtv-irq.h b/drivers/media/video/ivtv/ivtv-irq.h index f879a5822e71..1e84433737cc 100644 --- a/drivers/media/video/ivtv/ivtv-irq.h +++ b/drivers/media/video/ivtv/ivtv-irq.h @@ -46,7 +46,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id); -void ivtv_irq_work_handler(struct work_struct *work); +void ivtv_irq_work_handler(struct kthread_work *work); void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock); void ivtv_unfinished_dma(unsigned long arg); -- cgit v1.2.3-59-g8ed1b From 82805ab77d25643f579d90397dcd34f05d1b750a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: kthread: implement kthread_data() Implement kthread_data() which takes @task pointing to a kthread and returns @data specified when creating the kthread. The caller is responsible for ensuring the validity of @task when calling this function. Signed-off-by: Tejun Heo --- include/linux/kthread.h | 1 + kernel/kthread.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index f93cb6979edc..685ea65eb803 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -30,6 +30,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), void kthread_bind(struct task_struct *k, unsigned int cpu); int kthread_stop(struct task_struct *k); int kthread_should_stop(void); +void *kthread_data(struct task_struct *k); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/kernel/kthread.c b/kernel/kthread.c index 8b63c7fee73b..2dc3786349d1 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -37,6 +37,7 @@ struct kthread_create_info struct kthread { int should_stop; + void *data; struct completion exited; }; @@ -56,6 +57,19 @@ int kthread_should_stop(void) } EXPORT_SYMBOL(kthread_should_stop); +/** + * kthread_data - return data value specified on kthread creation + * @task: kthread task in question + * + * Return the data value specified when kthread @task was created. + * The caller is responsible for ensuring the validity of @task when + * calling this function. + */ +void *kthread_data(struct task_struct *task) +{ + return to_kthread(task)->data; +} + static int kthread(void *_create) { /* Copy data: it's on kthread's stack */ @@ -66,6 +80,7 @@ static int kthread(void *_create) int ret; self.should_stop = 0; + self.data = data; init_completion(&self.exited); current->vfork_done = &self.exited; -- cgit v1.2.3-59-g8ed1b From 8fec62b2d9d0c80b594d0d85678bfdf57a70df1b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: acpi: use queue_work_on() instead of binding workqueue worker to cpu0 ACPI works need to be executed on cpu0 and acpi/osl.c achieves this by creating singlethread workqueue and then binding it to cpu0 from a work which is quite unorthodox. Make it create regular workqueues and use queue_work_on() instead. This is in preparation of concurrency managed workqueue and the extra workers won't be a problem after it's implemented. Signed-off-by: Tejun Heo --- drivers/acpi/osl.c | 40 +++++++++++----------------------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 78418ce4fc78..46cce391fa46 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -191,36 +191,11 @@ acpi_status __init acpi_os_initialize(void) return AE_OK; } -static void bind_to_cpu0(struct work_struct *work) -{ - set_cpus_allowed_ptr(current, cpumask_of(0)); - kfree(work); -} - -static void bind_workqueue(struct workqueue_struct *wq) -{ - struct work_struct *work; - - work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); - INIT_WORK(work, bind_to_cpu0); - queue_work(wq, work); -} - acpi_status acpi_os_initialize1(void) { - /* - * On some machines, a software-initiated SMI causes corruption unless - * the SMI runs on CPU 0. An SMI can be initiated by any AML, but - * typically it's done in GPE-related methods that are run via - * workqueues, so we can avoid the known corruption cases by binding - * the workqueues to CPU 0. - */ - kacpid_wq = create_singlethread_workqueue("kacpid"); - bind_workqueue(kacpid_wq); - kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); - bind_workqueue(kacpi_notify_wq); - kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); - bind_workqueue(kacpi_hotplug_wq); + kacpid_wq = create_workqueue("kacpid"); + kacpi_notify_wq = create_workqueue("kacpi_notify"); + kacpi_hotplug_wq = create_workqueue("kacpi_hotplug"); BUG_ON(!kacpid_wq); BUG_ON(!kacpi_notify_wq); BUG_ON(!kacpi_hotplug_wq); @@ -766,7 +741,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, else INIT_WORK(&dpc->work, acpi_os_execute_deferred); - ret = queue_work(queue, &dpc->work); + /* + * On some machines, a software-initiated SMI causes corruption unless + * the SMI runs on CPU 0. An SMI can be initiated by any AML, but + * typically it's done in GPE-related methods that are run via + * workqueues, so we can avoid the known corruption cases by always + * queueing on CPU 0. + */ + ret = queue_work_on(0, queue, &dpc->work); if (!ret) { printk(KERN_ERR PREFIX -- cgit v1.2.3-59-g8ed1b From c790bce0481857412c964c5e9d46d56e41c4b051 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:09 +0200 Subject: workqueue: kill RT workqueue With stop_machine() converted to use cpu_stop, RT workqueue doesn't have any user left. Kill RT workqueue support. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 20 +++++++++----------- kernel/workqueue.c | 6 ------ 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 9466e860d8c2..0697946c66a1 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -181,12 +181,11 @@ static inline void destroy_work_on_stack(struct work_struct *work) { } extern struct workqueue_struct * -__create_workqueue_key(const char *name, int singlethread, - int freezeable, int rt, struct lock_class_key *key, - const char *lock_name); +__create_workqueue_key(const char *name, int singlethread, int freezeable, + struct lock_class_key *key, const char *lock_name); #ifdef CONFIG_LOCKDEP -#define __create_workqueue(name, singlethread, freezeable, rt) \ +#define __create_workqueue(name, singlethread, freezeable) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ @@ -197,19 +196,18 @@ __create_workqueue_key(const char *name, int singlethread, __lock_name = #name; \ \ __create_workqueue_key((name), (singlethread), \ - (freezeable), (rt), &__key, \ + (freezeable), &__key, \ __lock_name); \ }) #else -#define __create_workqueue(name, singlethread, freezeable, rt) \ - __create_workqueue_key((name), (singlethread), (freezeable), (rt), \ +#define __create_workqueue(name, singlethread, freezeable) \ + __create_workqueue_key((name), (singlethread), (freezeable), \ NULL, NULL) #endif -#define create_workqueue(name) __create_workqueue((name), 0, 0, 0) -#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1) -#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) -#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) +#define create_workqueue(name) __create_workqueue((name), 0, 0) +#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) +#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 327d2deb4451..1a47fbf92fae 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -62,7 +62,6 @@ struct workqueue_struct { const char *name; int singlethread; int freezeable; /* Freeze threads during suspend */ - int rt; #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif @@ -947,7 +946,6 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu) static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; struct workqueue_struct *wq = cwq->wq; const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; struct task_struct *p; @@ -963,8 +961,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) */ if (IS_ERR(p)) return PTR_ERR(p); - if (cwq->wq->rt) - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); cwq->thread = p; trace_workqueue_creation(cwq->thread, cpu); @@ -986,7 +982,6 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) struct workqueue_struct *__create_workqueue_key(const char *name, int singlethread, int freezeable, - int rt, struct lock_class_key *key, const char *lock_name) { @@ -1008,7 +1003,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); wq->singlethread = singlethread; wq->freezeable = freezeable; - wq->rt = rt; INIT_LIST_HEAD(&wq->list); if (singlethread) { -- cgit v1.2.3-59-g8ed1b From 4690c4ab56c71919893ca25252f2dd65b58188c7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: misc/cosmetic updates Make the following updates in preparation of concurrency managed workqueue. None of these changes causes any visible behavior difference. * Add comments and adjust indentations to data structures and several functions. * Rename wq_per_cpu() to get_cwq() and swap the position of two parameters for consistency. Convert a direct per_cpu_ptr() access to wq->cpu_wq to get_cwq(). * Add work_static() and Update set_wq_data() such that it sets the flags part to WORK_STRUCT_PENDING | WORK_STRUCT_STATIC if static | @extra_flags. * Move santiy check on work->entry emptiness from queue_work_on() to __queue_work() which all queueing paths share. * Make __queue_work() take @cpu and @wq instead of @cwq. * Restructure flush_work() and __create_workqueue_key() to make them easier to modify. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 5 ++ kernel/workqueue.c | 131 +++++++++++++++++++++++++++++----------------- 2 files changed, 89 insertions(+), 47 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0697946c66a1..e724dafc9e6d 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -96,9 +96,14 @@ struct execute_work { #ifdef CONFIG_DEBUG_OBJECTS_WORK extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); +static inline unsigned int work_static(struct work_struct *work) +{ + return *work_data_bits(work) & (1 << WORK_STRUCT_STATIC); +} #else static inline void __init_work(struct work_struct *work, int onstack) { } static inline void destroy_work_on_stack(struct work_struct *work) { } +static inline unsigned int work_static(struct work_struct *work) { return 0; } #endif /* diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1a47fbf92fae..c56146a755e5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -36,6 +36,16 @@ #define CREATE_TRACE_POINTS #include +/* + * Structure fields follow one of the following exclusion rules. + * + * I: Set during initialization and read-only afterwards. + * + * L: cwq->lock protected. Access with cwq->lock held. + * + * W: workqueue_lock protected. + */ + /* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). @@ -48,8 +58,8 @@ struct cpu_workqueue_struct { wait_queue_head_t more_work; struct work_struct *current_work; - struct workqueue_struct *wq; - struct task_struct *thread; + struct workqueue_struct *wq; /* I: the owning workqueue */ + struct task_struct *thread; } ____cacheline_aligned; /* @@ -57,13 +67,13 @@ struct cpu_workqueue_struct { * per-CPU workqueues: */ struct workqueue_struct { - struct cpu_workqueue_struct *cpu_wq; - struct list_head list; - const char *name; + struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ + struct list_head list; /* W: list of all workqueues */ + const char *name; /* I: workqueue name */ int singlethread; int freezeable; /* Freeze threads during suspend */ #ifdef CONFIG_LOCKDEP - struct lockdep_map lockdep_map; + struct lockdep_map lockdep_map; #endif }; @@ -204,8 +214,8 @@ static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) ? cpu_singlethread_map : cpu_populated_map; } -static -struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) +static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, + struct workqueue_struct *wq) { if (unlikely(is_wq_single_threaded(wq))) cpu = singlethread_cpu; @@ -217,15 +227,13 @@ struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) * - Must *only* be called if the pending flag is set */ static inline void set_wq_data(struct work_struct *work, - struct cpu_workqueue_struct *cwq) + struct cpu_workqueue_struct *cwq, + unsigned long extra_flags) { - unsigned long new; - BUG_ON(!work_pending(work)); - new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); - new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); - atomic_long_set(&work->data, new); + atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | + (1UL << WORK_STRUCT_PENDING) | extra_flags); } /* @@ -233,9 +241,7 @@ static inline void set_wq_data(struct work_struct *work, */ static inline void clear_wq_data(struct work_struct *work) { - unsigned long flags = *work_data_bits(work) & - (1UL << WORK_STRUCT_STATIC); - atomic_long_set(&work->data, flags); + atomic_long_set(&work->data, work_static(work)); } static inline @@ -244,29 +250,47 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); } +/** + * insert_work - insert a work into cwq + * @cwq: cwq @work belongs to + * @work: work to insert + * @head: insertion point + * @extra_flags: extra WORK_STRUCT_* flags to set + * + * Insert @work into @cwq after @head. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ static void insert_work(struct cpu_workqueue_struct *cwq, - struct work_struct *work, struct list_head *head) + struct work_struct *work, struct list_head *head, + unsigned int extra_flags) { trace_workqueue_insertion(cwq->thread, work); - set_wq_data(work, cwq); + /* we own @work, set data and link */ + set_wq_data(work, cwq, extra_flags); + /* * Ensure that we get the right work->data if we see the * result of list_add() below, see try_to_grab_pending(). */ smp_wmb(); + list_add_tail(&work->entry, head); wake_up(&cwq->more_work); } -static void __queue_work(struct cpu_workqueue_struct *cwq, +static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); unsigned long flags; debug_work_activate(work); spin_lock_irqsave(&cwq->lock, flags); - insert_work(cwq, work, &cwq->worklist); + BUG_ON(!list_empty(&work->entry)); + insert_work(cwq, work, &cwq->worklist, 0); spin_unlock_irqrestore(&cwq->lock, flags); } @@ -308,8 +332,7 @@ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) int ret = 0; if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { - BUG_ON(!list_empty(&work->entry)); - __queue_work(wq_per_cpu(wq, cpu), work); + __queue_work(cpu, wq, work); ret = 1; } return ret; @@ -320,9 +343,8 @@ static void delayed_work_timer_fn(unsigned long __data) { struct delayed_work *dwork = (struct delayed_work *)__data; struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); - struct workqueue_struct *wq = cwq->wq; - __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); + __queue_work(smp_processor_id(), cwq->wq, &dwork->work); } /** @@ -366,7 +388,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); + set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -430,6 +452,12 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) spin_unlock_irq(&cwq->lock); } +/** + * worker_thread - the worker thread function + * @__cwq: cwq to serve + * + * The cwq worker thread function. + */ static int worker_thread(void *__cwq) { struct cpu_workqueue_struct *cwq = __cwq; @@ -468,6 +496,17 @@ static void wq_barrier_func(struct work_struct *work) complete(&barr->done); } +/** + * insert_wq_barrier - insert a barrier work + * @cwq: cwq to insert barrier into + * @barr: wq_barrier to insert + * @head: insertion point + * + * Insert barrier @barr into @cwq before @head. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, struct wq_barrier *barr, struct list_head *head) { @@ -479,11 +518,10 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, */ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); - init_completion(&barr->done); debug_work_activate(&barr->work); - insert_work(cwq, &barr->work, head); + insert_work(cwq, &barr->work, head, 0); } static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) @@ -517,9 +555,6 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) * * We sleep until all works which were queued on entry have been handled, * but we are not livelocked by new incoming ones. - * - * This function used to run the workqueues itself. Now we just wait for the - * helper threads to do it. */ void flush_workqueue(struct workqueue_struct *wq) { @@ -558,7 +593,6 @@ int flush_work(struct work_struct *work) lock_map_acquire(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); - prev = NULL; spin_lock_irq(&cwq->lock); if (!list_empty(&work->entry)) { /* @@ -567,22 +601,22 @@ int flush_work(struct work_struct *work) */ smp_rmb(); if (unlikely(cwq != get_wq_data(work))) - goto out; + goto already_gone; prev = &work->entry; } else { if (cwq->current_work != work) - goto out; + goto already_gone; prev = &cwq->worklist; } insert_wq_barrier(cwq, &barr, prev->next); -out: - spin_unlock_irq(&cwq->lock); - if (!prev) - return 0; + spin_unlock_irq(&cwq->lock); wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); return 1; +already_gone: + spin_unlock_irq(&cwq->lock); + return 0; } EXPORT_SYMBOL_GPL(flush_work); @@ -665,7 +699,7 @@ static void wait_on_work(struct work_struct *work) cpu_map = wq_cpu_map(wq); for_each_cpu(cpu, cpu_map) - wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); + wait_on_cpu_work(get_cwq(cpu, wq), work); } static int __cancel_work_timer(struct work_struct *work, @@ -782,9 +816,8 @@ EXPORT_SYMBOL(schedule_delayed_work); void flush_delayed_work(struct delayed_work *dwork) { if (del_timer_sync(&dwork->timer)) { - struct cpu_workqueue_struct *cwq; - cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu()); - __queue_work(cwq, &dwork->work); + __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, + &dwork->work); put_cpu(); } flush_work(&dwork->work); @@ -991,13 +1024,11 @@ struct workqueue_struct *__create_workqueue_key(const char *name, wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) - return NULL; + goto err; wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); - if (!wq->cpu_wq) { - kfree(wq); - return NULL; - } + if (!wq->cpu_wq) + goto err; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); @@ -1041,6 +1072,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, wq = NULL; } return wq; +err: + if (wq) { + free_percpu(wq->cpu_wq); + kfree(wq); + } + return NULL; } EXPORT_SYMBOL_GPL(__create_workqueue_key); -- cgit v1.2.3-59-g8ed1b From 97e37d7b9e65a6ac939f796f91081135b7a08acc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: merge feature parameters into flags Currently, __create_workqueue_key() takes @singlethread and @freezeable paramters and store them separately in workqueue_struct. Merge them into a single flags parameter and field and use WQ_FREEZEABLE and WQ_SINGLE_THREAD. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 25 +++++++++++++++---------- kernel/workqueue.c | 17 +++++++---------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index e724dafc9e6d..d89cfc143b1a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -184,13 +184,17 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } #define work_clear_pending(work) \ clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) +enum { + WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ + WQ_SINGLE_THREAD = 1 << 1, /* no per-cpu worker */ +}; extern struct workqueue_struct * -__create_workqueue_key(const char *name, int singlethread, int freezeable, +__create_workqueue_key(const char *name, unsigned int flags, struct lock_class_key *key, const char *lock_name); #ifdef CONFIG_LOCKDEP -#define __create_workqueue(name, singlethread, freezeable) \ +#define __create_workqueue(name, flags) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ @@ -200,19 +204,20 @@ __create_workqueue_key(const char *name, int singlethread, int freezeable, else \ __lock_name = #name; \ \ - __create_workqueue_key((name), (singlethread), \ - (freezeable), &__key, \ + __create_workqueue_key((name), (flags), &__key, \ __lock_name); \ }) #else -#define __create_workqueue(name, singlethread, freezeable) \ - __create_workqueue_key((name), (singlethread), (freezeable), \ - NULL, NULL) +#define __create_workqueue(name, flags) \ + __create_workqueue_key((name), (flags), NULL, NULL) #endif -#define create_workqueue(name) __create_workqueue((name), 0, 0) -#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) -#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) +#define create_workqueue(name) \ + __create_workqueue((name), 0) +#define create_freezeable_workqueue(name) \ + __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD) +#define create_singlethread_workqueue(name) \ + __create_workqueue((name), WQ_SINGLE_THREAD) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c56146a755e5..68e4dd808ec0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -67,11 +67,10 @@ struct cpu_workqueue_struct { * per-CPU workqueues: */ struct workqueue_struct { + unsigned int flags; /* I: WQ_* flags */ struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ struct list_head list; /* W: list of all workqueues */ const char *name; /* I: workqueue name */ - int singlethread; - int freezeable; /* Freeze threads during suspend */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif @@ -203,9 +202,9 @@ static const struct cpumask *cpu_singlethread_map __read_mostly; static cpumask_var_t cpu_populated_map __read_mostly; /* If it's single threaded, it isn't in the list of workqueues. */ -static inline int is_wq_single_threaded(struct workqueue_struct *wq) +static inline bool is_wq_single_threaded(struct workqueue_struct *wq) { - return wq->singlethread; + return wq->flags & WQ_SINGLE_THREAD; } static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) @@ -463,7 +462,7 @@ static int worker_thread(void *__cwq) struct cpu_workqueue_struct *cwq = __cwq; DEFINE_WAIT(wait); - if (cwq->wq->freezeable) + if (cwq->wq->flags & WQ_FREEZEABLE) set_freezable(); for (;;) { @@ -1013,8 +1012,7 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) } struct workqueue_struct *__create_workqueue_key(const char *name, - int singlethread, - int freezeable, + unsigned int flags, struct lock_class_key *key, const char *lock_name) { @@ -1030,13 +1028,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, if (!wq->cpu_wq) goto err; + wq->flags = flags; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); - wq->singlethread = singlethread; - wq->freezeable = freezeable; INIT_LIST_HEAD(&wq->list); - if (singlethread) { + if (flags & WQ_SINGLE_THREAD) { cwq = init_cpu_workqueue(wq, singlethread_cpu); err = create_workqueue_thread(cwq, singlethread_cpu); start_workqueue_thread(cwq, -1); -- cgit v1.2.3-59-g8ed1b From 22df02bb3fab24af97bff4c69cc6fd8529fc66fe Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: define masks for work flags and conditionalize STATIC flags Work flags are about to see more traditional mask handling. Define WORK_STRUCT_*_BIT as the bit position constant and redefine WORK_STRUCT_* as bit masks. Also, make WORK_STRUCT_STATIC_* flags conditional While at it, re-define these constants as enums and use WORK_STRUCT_STATIC instead of hard-coding 2 in WORK_DATA_STATIC_INIT(). Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 29 +++++++++++++++++++++-------- kernel/workqueue.c | 12 ++++++------ 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d89cfc143b1a..d60c5701ab45 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -22,12 +22,25 @@ typedef void (*work_func_t)(struct work_struct *work); */ #define work_data_bits(work) ((unsigned long *)(&(work)->data)) +enum { + WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC_BIT = 1, /* static initializer (debugobjects) */ +#endif + + WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, +#else + WORK_STRUCT_STATIC = 0, +#endif + + WORK_STRUCT_FLAG_MASK = 3UL, + WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, +}; + struct work_struct { atomic_long_t data; -#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ -#define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */ -#define WORK_STRUCT_FLAG_MASK (3UL) -#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) struct list_head entry; work_func_t func; #ifdef CONFIG_LOCKDEP @@ -36,7 +49,7 @@ struct work_struct { }; #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) -#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) +#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_STATIC) struct delayed_work { struct work_struct work; @@ -98,7 +111,7 @@ extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); static inline unsigned int work_static(struct work_struct *work) { - return *work_data_bits(work) & (1 << WORK_STRUCT_STATIC); + return *work_data_bits(work) & WORK_STRUCT_STATIC; } #else static inline void __init_work(struct work_struct *work, int onstack) { } @@ -167,7 +180,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } * @work: The work item in question */ #define work_pending(work) \ - test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) + test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) /** * delayed_work_pending - Find out whether a delayable work item is currently @@ -182,7 +195,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } * @work: The work item in question */ #define work_clear_pending(work) \ - clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) + clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) enum { WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 68e4dd808ec0..5c49d762293b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -115,7 +115,7 @@ static int work_fixup_activate(void *addr, enum debug_obj_state state) * statically initialized. We just make sure that it * is tracked in the object tracker. */ - if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { + if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { debug_object_init(work, &work_debug_descr); debug_object_activate(work, &work_debug_descr); return 0; @@ -232,7 +232,7 @@ static inline void set_wq_data(struct work_struct *work, BUG_ON(!work_pending(work)); atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | - (1UL << WORK_STRUCT_PENDING) | extra_flags); + WORK_STRUCT_PENDING | extra_flags); } /* @@ -330,7 +330,7 @@ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { int ret = 0; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = 1; } @@ -380,7 +380,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -516,7 +516,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, * might deadlock. */ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); - __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); + __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion(&barr->done); debug_work_activate(&barr->work); @@ -628,7 +628,7 @@ static int try_to_grab_pending(struct work_struct *work) struct cpu_workqueue_struct *cwq; int ret = -1; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) return 0; /* -- cgit v1.2.3-59-g8ed1b From a62428c0ae54a39e411251e836c3fe3dc11a5f5f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:10 +0200 Subject: workqueue: separate out process_one_work() Separate out process_one_work() out of run_workqueue(). This patch doesn't cause any behavior change. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 100 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 39 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5c49d762293b..8e3082b76c7f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -402,51 +402,73 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +/** + * process_one_work - process single work + * @cwq: cwq to process work for + * @work: work to process + * + * Process @work. This function contains all the logics necessary to + * process a single work including synchronization against and + * interaction with other workers on the same cpu, queueing and + * flushing. As long as context requirement is met, any worker can + * call this function to process a work. + * + * CONTEXT: + * spin_lock_irq(cwq->lock) which is released and regrabbed. + */ +static void process_one_work(struct cpu_workqueue_struct *cwq, + struct work_struct *work) +{ + work_func_t f = work->func; +#ifdef CONFIG_LOCKDEP + /* + * It is permissible to free the struct work_struct from + * inside the function that is called from it, this we need to + * take into account for lockdep too. To avoid bogus "held + * lock freed" warnings as well as problems when looking into + * work->lockdep_map, make a copy and use that here. + */ + struct lockdep_map lockdep_map = work->lockdep_map; +#endif + /* claim and process */ + trace_workqueue_execution(cwq->thread, work); + debug_work_deactivate(work); + cwq->current_work = work; + list_del_init(&work->entry); + + spin_unlock_irq(&cwq->lock); + + BUG_ON(get_wq_data(work) != cwq); + work_clear_pending(work); + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_acquire(&lockdep_map); + f(work); + lock_map_release(&lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); + + if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { + printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " + "%s/0x%08x/%d\n", + current->comm, preempt_count(), task_pid_nr(current)); + printk(KERN_ERR " last function: "); + print_symbol("%s\n", (unsigned long)f); + debug_show_held_locks(current); + dump_stack(); + } + + spin_lock_irq(&cwq->lock); + + /* we're done with it, release */ + cwq->current_work = NULL; +} + static void run_workqueue(struct cpu_workqueue_struct *cwq) { spin_lock_irq(&cwq->lock); while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); - work_func_t f = work->func; -#ifdef CONFIG_LOCKDEP - /* - * It is permissible to free the struct work_struct - * from inside the function that is called from it, - * this we need to take into account for lockdep too. - * To avoid bogus "held lock freed" warnings as well - * as problems when looking into work->lockdep_map, - * make a copy and use that here. - */ - struct lockdep_map lockdep_map = work->lockdep_map; -#endif - trace_workqueue_execution(cwq->thread, work); - debug_work_deactivate(work); - cwq->current_work = work; - list_del_init(cwq->worklist.next); - spin_unlock_irq(&cwq->lock); - - BUG_ON(get_wq_data(work) != cwq); - work_clear_pending(work); - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_acquire(&lockdep_map); - f(work); - lock_map_release(&lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); - - if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { - printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " - "%s/0x%08x/%d\n", - current->comm, preempt_count(), - task_pid_nr(current)); - printk(KERN_ERR " last function: "); - print_symbol("%s\n", (unsigned long)f); - debug_show_held_locks(current); - dump_stack(); - } - - spin_lock_irq(&cwq->lock); - cwq->current_work = NULL; + process_one_work(cwq, work); } spin_unlock_irq(&cwq->lock); } -- cgit v1.2.3-59-g8ed1b From 64166699752006f1a23a9cf7c96ae36654ccfc2c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: temporarily remove workqueue tracing Strip tracing code from workqueue and remove workqueue tracing. This is temporary measure till concurrency managed workqueue is complete. Signed-off-by: Tejun Heo Cc: Frederic Weisbecker --- include/trace/events/workqueue.h | 92 ---------------------------------------- kernel/trace/Kconfig | 11 ----- kernel/workqueue.c | 14 ++---- 3 files changed, 3 insertions(+), 114 deletions(-) delete mode 100644 include/trace/events/workqueue.h diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h deleted file mode 100644 index d6c974474e70..000000000000 --- a/include/trace/events/workqueue.h +++ /dev/null @@ -1,92 +0,0 @@ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM workqueue - -#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_WORKQUEUE_H - -#include -#include -#include - -DECLARE_EVENT_CLASS(workqueue, - - TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), - - TP_ARGS(wq_thread, work), - - TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - __field(work_func_t, func) - ), - - TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - __entry->func = work->func; - ), - - TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, - __entry->thread_pid, __entry->func) -); - -DEFINE_EVENT(workqueue, workqueue_insertion, - - TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), - - TP_ARGS(wq_thread, work) -); - -DEFINE_EVENT(workqueue, workqueue_execution, - - TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), - - TP_ARGS(wq_thread, work) -); - -/* Trace the creation of one workqueue thread on a cpu */ -TRACE_EVENT(workqueue_creation, - - TP_PROTO(struct task_struct *wq_thread, int cpu), - - TP_ARGS(wq_thread, cpu), - - TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - __field(int, cpu) - ), - - TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - __entry->cpu = cpu; - ), - - TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm, - __entry->thread_pid, __entry->cpu) -); - -TRACE_EVENT(workqueue_destruction, - - TP_PROTO(struct task_struct *wq_thread), - - TP_ARGS(wq_thread), - - TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - ), - - TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - ), - - TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid) -); - -#endif /* _TRACE_WORKQUEUE_H */ - -/* This part must be outside protection */ -#include diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8b1797c4545b..a0d95c1f3f82 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -391,17 +391,6 @@ config KMEMTRACE If unsure, say N. -config WORKQUEUE_TRACER - bool "Trace workqueues" - select GENERIC_TRACER - help - The workqueue tracer provides some statistical information - about each cpu workqueue thread such as the number of the - works inserted and executed since their creation. It can help - to evaluate the amount of work each of them has to perform. - For example it can help a developer to decide whether he should - choose a per-cpu workqueue instead of a singlethreaded one. - config BLK_DEV_IO_TRACE bool "Support for tracing block IO actions" depends on SYSFS diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8e3082b76c7f..f7ab703285a6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -33,8 +33,6 @@ #include #include #include -#define CREATE_TRACE_POINTS -#include /* * Structure fields follow one of the following exclusion rules. @@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work) atomic_long_set(&work->data, work_static(work)); } -static inline -struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) +static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) { - return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); + return (void *)(atomic_long_read(&work->data) & + WORK_STRUCT_WQ_DATA_MASK); } /** @@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { - trace_workqueue_insertion(cwq->thread, work); - /* we own @work, set data and link */ set_wq_data(work, cwq, extra_flags); @@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, struct lockdep_map lockdep_map = work->lockdep_map; #endif /* claim and process */ - trace_workqueue_execution(cwq->thread, work); debug_work_deactivate(work); cwq->current_work = work; list_del_init(&work->entry); @@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) return PTR_ERR(p); cwq->thread = p; - trace_workqueue_creation(cwq->thread, cpu); - return 0; } @@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) * checks list_empty(), and a "normal" queue_work() can't use * a dead CPU. */ - trace_workqueue_destruction(cwq->thread); kthread_stop(cwq->thread); cwq->thread = NULL; } -- cgit v1.2.3-59-g8ed1b From 1537663f5763892cacf1409ac0efef1b4f332d1e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: kill cpu_populated_map Worker management is about to be overhauled. Simplify things by removing cpu_populated_map, creating workers for all possible cpus and making single threaded workqueues behave more like multi threaded ones. After this patch, all cwqs are always initialized, all workqueues are linked on the workqueues list and workers for all possibles cpus always exist. This also makes CPU hotplug support simpler - checking ->cpus_allowed before processing works in worker_thread() and flushing cwqs on CPU_POST_DEAD are enough. While at it, make get_cwq() always return the cwq for the specified cpu, add target_cwq() for cases where single thread distinction is necessary and drop all direct usage of per_cpu_ptr() on wq->cpu_wq. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 173 ++++++++++++++++++----------------------------------- 1 file changed, 59 insertions(+), 114 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f7ab703285a6..dc78956ccf03 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -55,6 +55,7 @@ struct cpu_workqueue_struct { struct list_head worklist; wait_queue_head_t more_work; struct work_struct *current_work; + unsigned int cpu; struct workqueue_struct *wq; /* I: the owning workqueue */ struct task_struct *thread; @@ -189,34 +190,19 @@ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); static int singlethread_cpu __read_mostly; -static const struct cpumask *cpu_singlethread_map __read_mostly; -/* - * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD - * flushes cwq->worklist. This means that flush_workqueue/wait_on_work - * which comes in between can't use for_each_online_cpu(). We could - * use cpu_possible_map, the cpumask below is more a documentation - * than optimization. - */ -static cpumask_var_t cpu_populated_map __read_mostly; - -/* If it's single threaded, it isn't in the list of workqueues. */ -static inline bool is_wq_single_threaded(struct workqueue_struct *wq) -{ - return wq->flags & WQ_SINGLE_THREAD; -} -static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) +static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, + struct workqueue_struct *wq) { - return is_wq_single_threaded(wq) - ? cpu_singlethread_map : cpu_populated_map; + return per_cpu_ptr(wq->cpu_wq, cpu); } -static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, - struct workqueue_struct *wq) +static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, + struct workqueue_struct *wq) { - if (unlikely(is_wq_single_threaded(wq))) + if (unlikely(wq->flags & WQ_SINGLE_THREAD)) cpu = singlethread_cpu; - return per_cpu_ptr(wq->cpu_wq, cpu); + return get_cwq(cpu, wq); } /* @@ -279,7 +265,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); unsigned long flags; debug_work_activate(work); @@ -383,7 +369,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); + set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -495,6 +481,10 @@ static int worker_thread(void *__cwq) if (kthread_should_stop()) break; + if (unlikely(!cpumask_equal(&cwq->thread->cpus_allowed, + get_cpu_mask(cwq->cpu)))) + set_cpus_allowed_ptr(cwq->thread, + get_cpu_mask(cwq->cpu)); run_workqueue(cwq); } @@ -574,14 +564,13 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ void flush_workqueue(struct workqueue_struct *wq) { - const struct cpumask *cpu_map = wq_cpu_map(wq); int cpu; might_sleep(); lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); - for_each_cpu(cpu, cpu_map) - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); + for_each_possible_cpu(cpu) + flush_cpu_workqueue(get_cwq(cpu, wq)); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -699,7 +688,6 @@ static void wait_on_work(struct work_struct *work) { struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; - const struct cpumask *cpu_map; int cpu; might_sleep(); @@ -712,9 +700,8 @@ static void wait_on_work(struct work_struct *work) return; wq = cwq->wq; - cpu_map = wq_cpu_map(wq); - for_each_cpu(cpu, cpu_map) + for_each_possible_cpu(cpu) wait_on_cpu_work(get_cwq(cpu, wq), work); } @@ -972,7 +959,7 @@ int current_is_keventd(void) BUG_ON(!keventd_wq); - cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); + cwq = get_cwq(cpu, keventd_wq); if (current == cwq->thread) ret = 1; @@ -980,26 +967,12 @@ int current_is_keventd(void) } -static struct cpu_workqueue_struct * -init_cpu_workqueue(struct workqueue_struct *wq, int cpu) -{ - struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); - - cwq->wq = wq; - spin_lock_init(&cwq->lock); - INIT_LIST_HEAD(&cwq->worklist); - init_waitqueue_head(&cwq->more_work); - - return cwq; -} - static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { struct workqueue_struct *wq = cwq->wq; - const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; struct task_struct *p; - p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); + p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); /* * Nobody can add the work_struct to this cwq, * if (caller is __create_workqueue) @@ -1031,8 +1004,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct lock_class_key *key, const char *lock_name) { + bool singlethread = flags & WQ_SINGLE_THREAD; struct workqueue_struct *wq; - struct cpu_workqueue_struct *cwq; int err = 0, cpu; wq = kzalloc(sizeof(*wq), GFP_KERNEL); @@ -1048,37 +1021,37 @@ struct workqueue_struct *__create_workqueue_key(const char *name, lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); - if (flags & WQ_SINGLE_THREAD) { - cwq = init_cpu_workqueue(wq, singlethread_cpu); - err = create_workqueue_thread(cwq, singlethread_cpu); - start_workqueue_thread(cwq, -1); - } else { - cpu_maps_update_begin(); - /* - * We must place this wq on list even if the code below fails. - * cpu_down(cpu) can remove cpu from cpu_populated_map before - * destroy_workqueue() takes the lock, in that case we leak - * cwq[cpu]->thread. - */ - spin_lock(&workqueue_lock); - list_add(&wq->list, &workqueues); - spin_unlock(&workqueue_lock); - /* - * We must initialize cwqs for each possible cpu even if we - * are going to call destroy_workqueue() finally. Otherwise - * cpu_up() can hit the uninitialized cwq once we drop the - * lock. - */ - for_each_possible_cpu(cpu) { - cwq = init_cpu_workqueue(wq, cpu); - if (err || !cpu_online(cpu)) - continue; - err = create_workqueue_thread(cwq, cpu); + cpu_maps_update_begin(); + /* + * We must initialize cwqs for each possible cpu even if we + * are going to call destroy_workqueue() finally. Otherwise + * cpu_up() can hit the uninitialized cwq once we drop the + * lock. + */ + for_each_possible_cpu(cpu) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + cwq->wq = wq; + cwq->cpu = cpu; + spin_lock_init(&cwq->lock); + INIT_LIST_HEAD(&cwq->worklist); + init_waitqueue_head(&cwq->more_work); + + if (err) + continue; + err = create_workqueue_thread(cwq, cpu); + if (cpu_online(cpu) && !singlethread) start_workqueue_thread(cwq, cpu); - } - cpu_maps_update_done(); + else + start_workqueue_thread(cwq, -1); } + spin_lock(&workqueue_lock); + list_add(&wq->list, &workqueues); + spin_unlock(&workqueue_lock); + + cpu_maps_update_done(); + if (err) { destroy_workqueue(wq); wq = NULL; @@ -1128,17 +1101,16 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) */ void destroy_workqueue(struct workqueue_struct *wq) { - const struct cpumask *cpu_map = wq_cpu_map(wq); int cpu; cpu_maps_update_begin(); spin_lock(&workqueue_lock); list_del(&wq->list); spin_unlock(&workqueue_lock); + cpu_maps_update_done(); - for_each_cpu(cpu, cpu_map) - cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); - cpu_maps_update_done(); + for_each_possible_cpu(cpu) + cleanup_workqueue_thread(get_cwq(cpu, wq)); free_percpu(wq->cpu_wq); kfree(wq); @@ -1152,48 +1124,25 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned int cpu = (unsigned long)hcpu; struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; - int err = 0; action &= ~CPU_TASKS_FROZEN; - switch (action) { - case CPU_UP_PREPARE: - cpumask_set_cpu(cpu, cpu_populated_map); - } -undo: list_for_each_entry(wq, &workqueues, list) { - cwq = per_cpu_ptr(wq->cpu_wq, cpu); + if (wq->flags & WQ_SINGLE_THREAD) + continue; - switch (action) { - case CPU_UP_PREPARE: - err = create_workqueue_thread(cwq, cpu); - if (!err) - break; - printk(KERN_ERR "workqueue [%s] for %i failed\n", - wq->name, cpu); - action = CPU_UP_CANCELED; - err = -ENOMEM; - goto undo; - - case CPU_ONLINE: - start_workqueue_thread(cwq, cpu); - break; + cwq = get_cwq(cpu, wq); - case CPU_UP_CANCELED: - start_workqueue_thread(cwq, -1); + switch (action) { case CPU_POST_DEAD: - cleanup_workqueue_thread(cwq); + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); + flush_cpu_workqueue(cwq); break; } } - switch (action) { - case CPU_UP_CANCELED: - case CPU_POST_DEAD: - cpumask_clear_cpu(cpu, cpu_populated_map); - } - - return notifier_from_errno(err); + return notifier_from_errno(0); } #ifdef CONFIG_SMP @@ -1245,11 +1194,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); void __init init_workqueues(void) { - alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); - - cpumask_copy(cpu_populated_map, cpu_online_mask); singlethread_cpu = cpumask_first(cpu_possible_mask); - cpu_singlethread_map = cpumask_of(singlethread_cpu); hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); -- cgit v1.2.3-59-g8ed1b From 0f900049cbe2767d47c2a62b54f0e822e1d66840 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: update cwq alignement work->data field is used for two purposes. It points to cwq it's queued on and the lower bits are used for flags. Currently, two bits are reserved which is always safe as 4 byte alignment is guaranteed on every architecture. However, future changes will need more flag bits. On SMP, the percpu allocator is capable of honoring larger alignment (there are other users which depend on it) and larger alignment works just fine. On UP, percpu allocator is a thin wrapper around kzalloc/kfree() and don't honor alignment request. This patch introduces WORK_STRUCT_FLAG_BITS and implements alloc/free_cwqs() which guarantees max(1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long) alignment both on SMP and UP. On SMP, simply wrapping percpu allocator is enough. On UP, extra space is allocated so that cwq can be aligned and the original pointer can be stored after it which is used in the free path. * Alignment problem on UP is reported by Michal Simek. Signed-off-by: Tejun Heo Cc: Christoph Lameter Cc: Ingo Molnar Cc: Frederic Weisbecker Reported-by: Michal Simek --- include/linux/workqueue.h | 5 +++- kernel/workqueue.c | 60 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 59 insertions(+), 6 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d60c5701ab45..b90958a037dc 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -26,6 +26,9 @@ enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC_BIT = 1, /* static initializer (debugobjects) */ + WORK_STRUCT_FLAG_BITS = 2, +#else + WORK_STRUCT_FLAG_BITS = 1, #endif WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, @@ -35,7 +38,7 @@ enum { WORK_STRUCT_STATIC = 0, #endif - WORK_STRUCT_FLAG_MASK = 3UL, + WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, }; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index dc78956ccf03..74a38499b19a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -46,7 +46,9 @@ /* * The per-CPU workqueue (if single thread, we always use the first - * possible cpu). + * possible cpu). The lower WORK_STRUCT_FLAG_BITS of + * work_struct->data are used for flags and thus cwqs need to be + * aligned at two's power of the number of flag bits. */ struct cpu_workqueue_struct { @@ -59,7 +61,7 @@ struct cpu_workqueue_struct { struct workqueue_struct *wq; /* I: the owning workqueue */ struct task_struct *thread; -} ____cacheline_aligned; +}; /* * The externally visible workqueue abstraction is an array of @@ -967,6 +969,53 @@ int current_is_keventd(void) } +static struct cpu_workqueue_struct *alloc_cwqs(void) +{ + /* + * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. + * Make sure that the alignment isn't lower than that of + * unsigned long long. + */ + const size_t size = sizeof(struct cpu_workqueue_struct); + const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, + __alignof__(unsigned long long)); + struct cpu_workqueue_struct *cwqs; +#ifndef CONFIG_SMP + void *ptr; + + /* + * On UP, percpu allocator doesn't honor alignment parameter + * and simply uses arch-dependent default. Allocate enough + * room to align cwq and put an extra pointer at the end + * pointing back to the originally allocated pointer which + * will be used for free. + * + * FIXME: This really belongs to UP percpu code. Update UP + * percpu code to honor alignment and remove this ugliness. + */ + ptr = __alloc_percpu(size + align + sizeof(void *), 1); + cwqs = PTR_ALIGN(ptr, align); + *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr; +#else + /* On SMP, percpu allocator can do it itself */ + cwqs = __alloc_percpu(size, align); +#endif + /* just in case, make sure it's actually aligned */ + BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align)); + return cwqs; +} + +static void free_cwqs(struct cpu_workqueue_struct *cwqs) +{ +#ifndef CONFIG_SMP + /* on UP, the pointer to free is stored right after the cwq */ + if (cwqs) + free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0)); +#else + free_percpu(cwqs); +#endif +} + static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { struct workqueue_struct *wq = cwq->wq; @@ -1012,7 +1061,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, if (!wq) goto err; - wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); + wq->cpu_wq = alloc_cwqs(); if (!wq->cpu_wq) goto err; @@ -1031,6 +1080,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); cwq->wq = wq; cwq->cpu = cpu; spin_lock_init(&cwq->lock); @@ -1059,7 +1109,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, return wq; err: if (wq) { - free_percpu(wq->cpu_wq); + free_cwqs(wq->cpu_wq); kfree(wq); } return NULL; @@ -1112,7 +1162,7 @@ void destroy_workqueue(struct workqueue_struct *wq) for_each_possible_cpu(cpu) cleanup_workqueue_thread(get_cwq(cpu, wq)); - free_percpu(wq->cpu_wq); + free_cwqs(wq->cpu_wq); kfree(wq); } EXPORT_SYMBOL_GPL(destroy_workqueue); -- cgit v1.2.3-59-g8ed1b From 73f53c4aa732eced5fcb1844d3d452c30905f20f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: reimplement workqueue flushing using color coded works Reimplement workqueue flushing using color coded works. wq has the current work color which is painted on the works being issued via cwqs. Flushing a workqueue is achieved by advancing the current work colors of cwqs and waiting for all the works which have any of the previous colors to drain. Currently there are 16 possible colors, one is reserved for no color and 15 colors are useable allowing 14 concurrent flushes. When color space gets full, flush attempts are batched up and processed together when color frees up, so even with many concurrent flushers, the new implementation won't build up huge queue of flushers which has to be processed one after another. Only works which are queued via __queue_work() are colored. Works which are directly put on queue using insert_work() use NO_COLOR and don't participate in workqueue flushing. Currently only works used for work-specific flush fall in this category. This new implementation leaves only cleanup_workqueue_thread() as the user of flush_cpu_workqueue(). Just make its users use flush_workqueue() and kthread_stop() directly and kill cleanup_workqueue_thread(). As workqueue flushing doesn't use barrier request anymore, the comment describing the complex synchronization around it in cleanup_workqueue_thread() is removed together with the function. This new implementation is to allow having and sharing multiple workers per cpu. Please note that one more bit is reserved for a future work flag by this patch. This is to avoid shifting bits and updating comments later. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 21 ++- kernel/workqueue.c | 355 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 322 insertions(+), 54 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b90958a037dc..8762f62103d8 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -26,11 +26,13 @@ enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC_BIT = 1, /* static initializer (debugobjects) */ - WORK_STRUCT_FLAG_BITS = 2, + WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ #else - WORK_STRUCT_FLAG_BITS = 1, + WORK_STRUCT_COLOR_SHIFT = 2, /* color for workqueue flushing */ #endif + WORK_STRUCT_COLOR_BITS = 4, + WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, @@ -38,6 +40,21 @@ enum { WORK_STRUCT_STATIC = 0, #endif + /* + * The last color is no color used for works which don't + * participate in workqueue flushing. + */ + WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, + WORK_NO_COLOR = WORK_NR_COLORS, + + /* + * Reserve 6 bits off of cwq pointer w/ debugobjects turned + * off. This makes cwqs aligned to 64 bytes which isn't too + * excessive while allowing 15 workqueue flush colors. + */ + WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + + WORK_STRUCT_COLOR_BITS, + WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, }; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 74a38499b19a..56e47c59d73b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -41,6 +41,8 @@ * * L: cwq->lock protected. Access with cwq->lock held. * + * F: wq->flush_mutex protected. + * * W: workqueue_lock protected. */ @@ -60,9 +62,22 @@ struct cpu_workqueue_struct { unsigned int cpu; struct workqueue_struct *wq; /* I: the owning workqueue */ + int work_color; /* L: current color */ + int flush_color; /* L: flushing color */ + int nr_in_flight[WORK_NR_COLORS]; + /* L: nr of in_flight works */ struct task_struct *thread; }; +/* + * Structure used to wait for workqueue flush. + */ +struct wq_flusher { + struct list_head list; /* F: list of flushers */ + int flush_color; /* F: flush color waiting for */ + struct completion done; /* flush completion */ +}; + /* * The externally visible workqueue abstraction is an array of * per-CPU workqueues: @@ -71,6 +86,15 @@ struct workqueue_struct { unsigned int flags; /* I: WQ_* flags */ struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ struct list_head list; /* W: list of all workqueues */ + + struct mutex flush_mutex; /* protects wq flushing */ + int work_color; /* F: current work color */ + int flush_color; /* F: current flush color */ + atomic_t nr_cwqs_to_flush; /* flush in progress */ + struct wq_flusher *first_flusher; /* F: first flusher */ + struct list_head flusher_queue; /* F: flush waiters */ + struct list_head flusher_overflow; /* F: flush overflow list */ + const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; @@ -207,6 +231,22 @@ static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, return get_cwq(cpu, wq); } +static unsigned int work_color_to_flags(int color) +{ + return color << WORK_STRUCT_COLOR_SHIFT; +} + +static int get_work_color(struct work_struct *work) +{ + return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & + ((1 << WORK_STRUCT_COLOR_BITS) - 1); +} + +static int work_next_color(int color) +{ + return (color + 1) % WORK_NR_COLORS; +} + /* * Set the workqueue on which a work item is to be run * - Must *only* be called if the pending flag is set @@ -273,7 +313,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, debug_work_activate(work); spin_lock_irqsave(&cwq->lock, flags); BUG_ON(!list_empty(&work->entry)); - insert_work(cwq, work, &cwq->worklist, 0); + cwq->nr_in_flight[cwq->work_color]++; + insert_work(cwq, work, &cwq->worklist, + work_color_to_flags(cwq->work_color)); spin_unlock_irqrestore(&cwq->lock, flags); } @@ -386,6 +428,44 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +/** + * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight + * @cwq: cwq of interest + * @color: color of work which left the queue + * + * A work either has completed or is removed from pending queue, + * decrement nr_in_flight of its cwq and handle workqueue flushing. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ +static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) +{ + /* ignore uncolored works */ + if (color == WORK_NO_COLOR) + return; + + cwq->nr_in_flight[color]--; + + /* is flush in progress and are we at the flushing tip? */ + if (likely(cwq->flush_color != color)) + return; + + /* are there still in-flight works? */ + if (cwq->nr_in_flight[color]) + return; + + /* this cwq is done, clear flush_color */ + cwq->flush_color = -1; + + /* + * If this was the last cwq, wake up the first flusher. It + * will handle the rest. + */ + if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) + complete(&cwq->wq->first_flusher->done); +} + /** * process_one_work - process single work * @cwq: cwq to process work for @@ -404,6 +484,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) { work_func_t f = work->func; + int work_color; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -417,6 +498,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, /* claim and process */ debug_work_deactivate(work); cwq->current_work = work; + work_color = get_work_color(work); list_del_init(&work->entry); spin_unlock_irq(&cwq->lock); @@ -443,6 +525,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, /* we're done with it, release */ cwq->current_work = NULL; + cwq_dec_nr_in_flight(cwq, work_color); } static void run_workqueue(struct cpu_workqueue_struct *cwq) @@ -529,29 +612,78 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, init_completion(&barr->done); debug_work_activate(&barr->work); - insert_work(cwq, &barr->work, head, 0); + insert_work(cwq, &barr->work, head, work_color_to_flags(WORK_NO_COLOR)); } -static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) +/** + * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing + * @wq: workqueue being flushed + * @flush_color: new flush color, < 0 for no-op + * @work_color: new work color, < 0 for no-op + * + * Prepare cwqs for workqueue flushing. + * + * If @flush_color is non-negative, flush_color on all cwqs should be + * -1. If no cwq has in-flight commands at the specified color, all + * cwq->flush_color's stay at -1 and %false is returned. If any cwq + * has in flight commands, its cwq->flush_color is set to + * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq + * wakeup logic is armed and %true is returned. + * + * The caller should have initialized @wq->first_flusher prior to + * calling this function with non-negative @flush_color. If + * @flush_color is negative, no flush color update is done and %false + * is returned. + * + * If @work_color is non-negative, all cwqs should have the same + * work_color which is previous to @work_color and all will be + * advanced to @work_color. + * + * CONTEXT: + * mutex_lock(wq->flush_mutex). + * + * RETURNS: + * %true if @flush_color >= 0 and there's something to flush. %false + * otherwise. + */ +static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, + int flush_color, int work_color) { - int active = 0; - struct wq_barrier barr; + bool wait = false; + unsigned int cpu; - WARN_ON(cwq->thread == current); - - spin_lock_irq(&cwq->lock); - if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { - insert_wq_barrier(cwq, &barr, &cwq->worklist); - active = 1; + if (flush_color >= 0) { + BUG_ON(atomic_read(&wq->nr_cwqs_to_flush)); + atomic_set(&wq->nr_cwqs_to_flush, 1); } - spin_unlock_irq(&cwq->lock); - if (active) { - wait_for_completion(&barr.done); - destroy_work_on_stack(&barr.work); + for_each_possible_cpu(cpu) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + spin_lock_irq(&cwq->lock); + + if (flush_color >= 0) { + BUG_ON(cwq->flush_color != -1); + + if (cwq->nr_in_flight[flush_color]) { + cwq->flush_color = flush_color; + atomic_inc(&wq->nr_cwqs_to_flush); + wait = true; + } + } + + if (work_color >= 0) { + BUG_ON(work_color != work_next_color(cwq->work_color)); + cwq->work_color = work_color; + } + + spin_unlock_irq(&cwq->lock); } - return active; + if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) + complete(&wq->first_flusher->done); + + return wait; } /** @@ -566,13 +698,143 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ void flush_workqueue(struct workqueue_struct *wq) { - int cpu; + struct wq_flusher this_flusher = { + .list = LIST_HEAD_INIT(this_flusher.list), + .flush_color = -1, + .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), + }; + int next_color; - might_sleep(); lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); - for_each_possible_cpu(cpu) - flush_cpu_workqueue(get_cwq(cpu, wq)); + + mutex_lock(&wq->flush_mutex); + + /* + * Start-to-wait phase + */ + next_color = work_next_color(wq->work_color); + + if (next_color != wq->flush_color) { + /* + * Color space is not full. The current work_color + * becomes our flush_color and work_color is advanced + * by one. + */ + BUG_ON(!list_empty(&wq->flusher_overflow)); + this_flusher.flush_color = wq->work_color; + wq->work_color = next_color; + + if (!wq->first_flusher) { + /* no flush in progress, become the first flusher */ + BUG_ON(wq->flush_color != this_flusher.flush_color); + + wq->first_flusher = &this_flusher; + + if (!flush_workqueue_prep_cwqs(wq, wq->flush_color, + wq->work_color)) { + /* nothing to flush, done */ + wq->flush_color = next_color; + wq->first_flusher = NULL; + goto out_unlock; + } + } else { + /* wait in queue */ + BUG_ON(wq->flush_color == this_flusher.flush_color); + list_add_tail(&this_flusher.list, &wq->flusher_queue); + flush_workqueue_prep_cwqs(wq, -1, wq->work_color); + } + } else { + /* + * Oops, color space is full, wait on overflow queue. + * The next flush completion will assign us + * flush_color and transfer to flusher_queue. + */ + list_add_tail(&this_flusher.list, &wq->flusher_overflow); + } + + mutex_unlock(&wq->flush_mutex); + + wait_for_completion(&this_flusher.done); + + /* + * Wake-up-and-cascade phase + * + * First flushers are responsible for cascading flushes and + * handling overflow. Non-first flushers can simply return. + */ + if (wq->first_flusher != &this_flusher) + return; + + mutex_lock(&wq->flush_mutex); + + wq->first_flusher = NULL; + + BUG_ON(!list_empty(&this_flusher.list)); + BUG_ON(wq->flush_color != this_flusher.flush_color); + + while (true) { + struct wq_flusher *next, *tmp; + + /* complete all the flushers sharing the current flush color */ + list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { + if (next->flush_color != wq->flush_color) + break; + list_del_init(&next->list); + complete(&next->done); + } + + BUG_ON(!list_empty(&wq->flusher_overflow) && + wq->flush_color != work_next_color(wq->work_color)); + + /* this flush_color is finished, advance by one */ + wq->flush_color = work_next_color(wq->flush_color); + + /* one color has been freed, handle overflow queue */ + if (!list_empty(&wq->flusher_overflow)) { + /* + * Assign the same color to all overflowed + * flushers, advance work_color and append to + * flusher_queue. This is the start-to-wait + * phase for these overflowed flushers. + */ + list_for_each_entry(tmp, &wq->flusher_overflow, list) + tmp->flush_color = wq->work_color; + + wq->work_color = work_next_color(wq->work_color); + + list_splice_tail_init(&wq->flusher_overflow, + &wq->flusher_queue); + flush_workqueue_prep_cwqs(wq, -1, wq->work_color); + } + + if (list_empty(&wq->flusher_queue)) { + BUG_ON(wq->flush_color != wq->work_color); + break; + } + + /* + * Need to flush more colors. Make the next flusher + * the new first flusher and arm cwqs. + */ + BUG_ON(wq->flush_color == wq->work_color); + BUG_ON(wq->flush_color != next->flush_color); + + list_del_init(&next->list); + wq->first_flusher = next; + + if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1)) + break; + + /* + * Meh... this color is already done, clear first + * flusher and repeat cascading. + */ + wq->first_flusher = NULL; + } + +out_unlock: + mutex_unlock(&wq->flush_mutex); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -659,6 +921,7 @@ static int try_to_grab_pending(struct work_struct *work) if (cwq == get_wq_data(work)) { debug_work_deactivate(work); list_del_init(&work->entry); + cwq_dec_nr_in_flight(cwq, get_work_color(work)); ret = 1; } } @@ -1066,6 +1329,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name, goto err; wq->flags = flags; + mutex_init(&wq->flush_mutex); + atomic_set(&wq->nr_cwqs_to_flush, 0); + INIT_LIST_HEAD(&wq->flusher_queue); + INIT_LIST_HEAD(&wq->flusher_overflow); wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); @@ -1083,6 +1350,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); cwq->wq = wq; cwq->cpu = cpu; + cwq->flush_color = -1; spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); init_waitqueue_head(&cwq->more_work); @@ -1116,33 +1384,6 @@ err: } EXPORT_SYMBOL_GPL(__create_workqueue_key); -static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) -{ - /* - * Our caller is either destroy_workqueue() or CPU_POST_DEAD, - * cpu_add_remove_lock protects cwq->thread. - */ - if (cwq->thread == NULL) - return; - - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); - - flush_cpu_workqueue(cwq); - /* - * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, - * a concurrent flush_workqueue() can insert a barrier after us. - * However, in that case run_workqueue() won't return and check - * kthread_should_stop() until it flushes all work_struct's. - * When ->worklist becomes empty it is safe to exit because no - * more work_structs can be queued on this cwq: flush_workqueue - * checks list_empty(), and a "normal" queue_work() can't use - * a dead CPU. - */ - kthread_stop(cwq->thread); - cwq->thread = NULL; -} - /** * destroy_workqueue - safely terminate a workqueue * @wq: target workqueue @@ -1159,8 +1400,20 @@ void destroy_workqueue(struct workqueue_struct *wq) spin_unlock(&workqueue_lock); cpu_maps_update_done(); - for_each_possible_cpu(cpu) - cleanup_workqueue_thread(get_cwq(cpu, wq)); + flush_workqueue(wq); + + for_each_possible_cpu(cpu) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + int i; + + if (cwq->thread) { + kthread_stop(cwq->thread); + cwq->thread = NULL; + } + + for (i = 0; i < WORK_NR_COLORS; i++) + BUG_ON(cwq->nr_in_flight[i]); + } free_cwqs(wq->cpu_wq); kfree(wq); @@ -1185,9 +1438,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_POST_DEAD: - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); - flush_cpu_workqueue(cwq); + flush_workqueue(wq); break; } } -- cgit v1.2.3-59-g8ed1b From c34056a3fdde777c079cc8a70785c2602f2586cb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:11 +0200 Subject: workqueue: introduce worker Separate out worker thread related information to struct worker from struct cpu_workqueue_struct and implement helper functions to deal with the new struct worker. The only change which is visible outside is that now workqueue worker are all named "kworker/CPUID:WORKERID" where WORKERID is allocated from per-cpu ida. This is in preparation of concurrency managed workqueue where shared multiple workers would be available per cpu. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 211 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 150 insertions(+), 61 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 56e47c59d73b..600db10a4dbf 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -33,6 +33,7 @@ #include #include #include +#include /* * Structure fields follow one of the following exclusion rules. @@ -46,6 +47,15 @@ * W: workqueue_lock protected. */ +struct cpu_workqueue_struct; + +struct worker { + struct work_struct *current_work; /* L: work being processed */ + struct task_struct *task; /* I: worker task */ + struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ + int id; /* I: worker id */ +}; + /* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). The lower WORK_STRUCT_FLAG_BITS of @@ -58,15 +68,14 @@ struct cpu_workqueue_struct { struct list_head worklist; wait_queue_head_t more_work; - struct work_struct *current_work; unsigned int cpu; + struct worker *worker; struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ - struct task_struct *thread; }; /* @@ -214,6 +223,9 @@ static inline void debug_work_deactivate(struct work_struct *work) { } /* Serializes the accesses to the list of workqueues. */ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); +static DEFINE_PER_CPU(struct ida, worker_ida); + +static int worker_thread(void *__worker); static int singlethread_cpu __read_mostly; @@ -428,6 +440,105 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +static struct worker *alloc_worker(void) +{ + struct worker *worker; + + worker = kzalloc(sizeof(*worker), GFP_KERNEL); + return worker; +} + +/** + * create_worker - create a new workqueue worker + * @cwq: cwq the new worker will belong to + * @bind: whether to set affinity to @cpu or not + * + * Create a new worker which is bound to @cwq. The returned worker + * can be started by calling start_worker() or destroyed using + * destroy_worker(). + * + * CONTEXT: + * Might sleep. Does GFP_KERNEL allocations. + * + * RETURNS: + * Pointer to the newly created worker. + */ +static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) +{ + int id = -1; + struct worker *worker = NULL; + + spin_lock(&workqueue_lock); + while (ida_get_new(&per_cpu(worker_ida, cwq->cpu), &id)) { + spin_unlock(&workqueue_lock); + if (!ida_pre_get(&per_cpu(worker_ida, cwq->cpu), GFP_KERNEL)) + goto fail; + spin_lock(&workqueue_lock); + } + spin_unlock(&workqueue_lock); + + worker = alloc_worker(); + if (!worker) + goto fail; + + worker->cwq = cwq; + worker->id = id; + + worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", + cwq->cpu, id); + if (IS_ERR(worker->task)) + goto fail; + + if (bind) + kthread_bind(worker->task, cwq->cpu); + + return worker; +fail: + if (id >= 0) { + spin_lock(&workqueue_lock); + ida_remove(&per_cpu(worker_ida, cwq->cpu), id); + spin_unlock(&workqueue_lock); + } + kfree(worker); + return NULL; +} + +/** + * start_worker - start a newly created worker + * @worker: worker to start + * + * Start @worker. + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ +static void start_worker(struct worker *worker) +{ + wake_up_process(worker->task); +} + +/** + * destroy_worker - destroy a workqueue worker + * @worker: worker to be destroyed + * + * Destroy @worker. + */ +static void destroy_worker(struct worker *worker) +{ + int cpu = worker->cwq->cpu; + int id = worker->id; + + /* sanity check frenzy */ + BUG_ON(worker->current_work); + + kthread_stop(worker->task); + kfree(worker); + + spin_lock(&workqueue_lock); + ida_remove(&per_cpu(worker_ida, cpu), id); + spin_unlock(&workqueue_lock); +} + /** * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight * @cwq: cwq of interest @@ -468,7 +579,7 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) /** * process_one_work - process single work - * @cwq: cwq to process work for + * @worker: self * @work: work to process * * Process @work. This function contains all the logics necessary to @@ -480,9 +591,9 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) * CONTEXT: * spin_lock_irq(cwq->lock) which is released and regrabbed. */ -static void process_one_work(struct cpu_workqueue_struct *cwq, - struct work_struct *work) +static void process_one_work(struct worker *worker, struct work_struct *work) { + struct cpu_workqueue_struct *cwq = worker->cwq; work_func_t f = work->func; int work_color; #ifdef CONFIG_LOCKDEP @@ -497,7 +608,7 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, #endif /* claim and process */ debug_work_deactivate(work); - cwq->current_work = work; + worker->current_work = work; work_color = get_work_color(work); list_del_init(&work->entry); @@ -524,30 +635,33 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, spin_lock_irq(&cwq->lock); /* we're done with it, release */ - cwq->current_work = NULL; + worker->current_work = NULL; cwq_dec_nr_in_flight(cwq, work_color); } -static void run_workqueue(struct cpu_workqueue_struct *cwq) +static void run_workqueue(struct worker *worker) { + struct cpu_workqueue_struct *cwq = worker->cwq; + spin_lock_irq(&cwq->lock); while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); - process_one_work(cwq, work); + process_one_work(worker, work); } spin_unlock_irq(&cwq->lock); } /** * worker_thread - the worker thread function - * @__cwq: cwq to serve + * @__worker: self * * The cwq worker thread function. */ -static int worker_thread(void *__cwq) +static int worker_thread(void *__worker) { - struct cpu_workqueue_struct *cwq = __cwq; + struct worker *worker = __worker; + struct cpu_workqueue_struct *cwq = worker->cwq; DEFINE_WAIT(wait); if (cwq->wq->flags & WQ_FREEZEABLE) @@ -566,11 +680,11 @@ static int worker_thread(void *__cwq) if (kthread_should_stop()) break; - if (unlikely(!cpumask_equal(&cwq->thread->cpus_allowed, + if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, get_cpu_mask(cwq->cpu)))) - set_cpus_allowed_ptr(cwq->thread, + set_cpus_allowed_ptr(worker->task, get_cpu_mask(cwq->cpu)); - run_workqueue(cwq); + run_workqueue(worker); } return 0; @@ -873,7 +987,7 @@ int flush_work(struct work_struct *work) goto already_gone; prev = &work->entry; } else { - if (cwq->current_work != work) + if (!cwq->worker || cwq->worker->current_work != work) goto already_gone; prev = &cwq->worklist; } @@ -937,7 +1051,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, int running = 0; spin_lock_irq(&cwq->lock); - if (unlikely(cwq->current_work == work)) { + if (unlikely(cwq->worker && cwq->worker->current_work == work)) { insert_wq_barrier(cwq, &barr, cwq->worklist.next); running = 1; } @@ -1225,7 +1339,7 @@ int current_is_keventd(void) BUG_ON(!keventd_wq); cwq = get_cwq(cpu, keventd_wq); - if (current == cwq->thread) + if (current == cwq->worker->task) ret = 1; return ret; @@ -1279,38 +1393,6 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) #endif } -static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) -{ - struct workqueue_struct *wq = cwq->wq; - struct task_struct *p; - - p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); - /* - * Nobody can add the work_struct to this cwq, - * if (caller is __create_workqueue) - * nobody should see this wq - * else // caller is CPU_UP_PREPARE - * cpu is not on cpu_online_map - * so we can abort safely. - */ - if (IS_ERR(p)) - return PTR_ERR(p); - cwq->thread = p; - - return 0; -} - -static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) -{ - struct task_struct *p = cwq->thread; - - if (p != NULL) { - if (cpu >= 0) - kthread_bind(p, cpu); - wake_up_process(p); - } -} - struct workqueue_struct *__create_workqueue_key(const char *name, unsigned int flags, struct lock_class_key *key, @@ -1318,7 +1400,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, { bool singlethread = flags & WQ_SINGLE_THREAD; struct workqueue_struct *wq; - int err = 0, cpu; + bool failed = false; + unsigned int cpu; wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) @@ -1348,20 +1431,21 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); - cwq->wq = wq; cwq->cpu = cpu; + cwq->wq = wq; cwq->flush_color = -1; spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); init_waitqueue_head(&cwq->more_work); - if (err) + if (failed) continue; - err = create_workqueue_thread(cwq, cpu); - if (cpu_online(cpu) && !singlethread) - start_workqueue_thread(cwq, cpu); + cwq->worker = create_worker(cwq, + cpu_online(cpu) && !singlethread); + if (cwq->worker) + start_worker(cwq->worker); else - start_workqueue_thread(cwq, -1); + failed = true; } spin_lock(&workqueue_lock); @@ -1370,7 +1454,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cpu_maps_update_done(); - if (err) { + if (failed) { destroy_workqueue(wq); wq = NULL; } @@ -1406,9 +1490,9 @@ void destroy_workqueue(struct workqueue_struct *wq) struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); int i; - if (cwq->thread) { - kthread_stop(cwq->thread); - cwq->thread = NULL; + if (cwq->worker) { + destroy_worker(cwq->worker); + cwq->worker = NULL; } for (i = 0; i < WORK_NR_COLORS; i++) @@ -1495,6 +1579,11 @@ EXPORT_SYMBOL_GPL(work_on_cpu); void __init init_workqueues(void) { + unsigned int cpu; + + for_each_possible_cpu(cpu) + ida_init(&per_cpu(worker_ida, cpu)); + singlethread_cpu = cpumask_first(cpu_possible_mask); hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); -- cgit v1.2.3-59-g8ed1b From affee4b294a0fc97d67c8a77dc080c4dd262a79e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: reimplement work flushing using linked works A work is linked to the next one by having WORK_STRUCT_LINKED bit set and these links can be chained. When a linked work is dispatched to a worker, all linked works are dispatched to the worker's newly added ->scheduled queue and processed back-to-back. Currently, as there's only single worker per cwq, having linked works doesn't make any visible behavior difference. This change is to prepare for multiple shared workers per cpu. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 4 +- kernel/workqueue.c | 152 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 134 insertions(+), 22 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 8762f62103d8..4f4fdba722c3 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -24,8 +24,9 @@ typedef void (*work_func_t)(struct work_struct *work); enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ + WORK_STRUCT_LINKED_BIT = 1, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK - WORK_STRUCT_STATIC_BIT = 1, /* static initializer (debugobjects) */ + WORK_STRUCT_STATIC_BIT = 2, /* static initializer (debugobjects) */ WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ #else WORK_STRUCT_COLOR_SHIFT = 2, /* color for workqueue flushing */ @@ -34,6 +35,7 @@ enum { WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, + WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, #else diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 600db10a4dbf..9953d3c7bd10 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -51,6 +51,7 @@ struct cpu_workqueue_struct; struct worker { struct work_struct *current_work; /* L: work being processed */ + struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ int id; /* I: worker id */ @@ -445,6 +446,8 @@ static struct worker *alloc_worker(void) struct worker *worker; worker = kzalloc(sizeof(*worker), GFP_KERNEL); + if (worker) + INIT_LIST_HEAD(&worker->scheduled); return worker; } @@ -530,6 +533,7 @@ static void destroy_worker(struct worker *worker) /* sanity check frenzy */ BUG_ON(worker->current_work); + BUG_ON(!list_empty(&worker->scheduled)); kthread_stop(worker->task); kfree(worker); @@ -539,6 +543,47 @@ static void destroy_worker(struct worker *worker) spin_unlock(&workqueue_lock); } +/** + * move_linked_works - move linked works to a list + * @work: start of series of works to be scheduled + * @head: target list to append @work to + * @nextp: out paramter for nested worklist walking + * + * Schedule linked works starting from @work to @head. Work series to + * be scheduled starts at @work and includes any consecutive work with + * WORK_STRUCT_LINKED set in its predecessor. + * + * If @nextp is not NULL, it's updated to point to the next work of + * the last scheduled work. This allows move_linked_works() to be + * nested inside outer list_for_each_entry_safe(). + * + * CONTEXT: + * spin_lock_irq(cwq->lock). + */ +static void move_linked_works(struct work_struct *work, struct list_head *head, + struct work_struct **nextp) +{ + struct work_struct *n; + + /* + * Linked worklist will always end before the end of the list, + * use NULL for list head. + */ + list_for_each_entry_safe_from(work, n, NULL, entry) { + list_move_tail(&work->entry, head); + if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) + break; + } + + /* + * If we're already inside safe list traversal and have moved + * multiple works to the scheduled queue, the next position + * needs to be updated. + */ + if (nextp) + *nextp = n; +} + /** * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight * @cwq: cwq of interest @@ -639,17 +684,25 @@ static void process_one_work(struct worker *worker, struct work_struct *work) cwq_dec_nr_in_flight(cwq, work_color); } -static void run_workqueue(struct worker *worker) +/** + * process_scheduled_works - process scheduled works + * @worker: self + * + * Process all scheduled works. Please note that the scheduled list + * may change while processing a work, so this function repeatedly + * fetches a work from the top and executes it. + * + * CONTEXT: + * spin_lock_irq(cwq->lock) which may be released and regrabbed + * multiple times. + */ +static void process_scheduled_works(struct worker *worker) { - struct cpu_workqueue_struct *cwq = worker->cwq; - - spin_lock_irq(&cwq->lock); - while (!list_empty(&cwq->worklist)) { - struct work_struct *work = list_entry(cwq->worklist.next, + while (!list_empty(&worker->scheduled)) { + struct work_struct *work = list_first_entry(&worker->scheduled, struct work_struct, entry); process_one_work(worker, work); } - spin_unlock_irq(&cwq->lock); } /** @@ -684,7 +737,28 @@ static int worker_thread(void *__worker) get_cpu_mask(cwq->cpu)))) set_cpus_allowed_ptr(worker->task, get_cpu_mask(cwq->cpu)); - run_workqueue(worker); + + spin_lock_irq(&cwq->lock); + + while (!list_empty(&cwq->worklist)) { + struct work_struct *work = + list_first_entry(&cwq->worklist, + struct work_struct, entry); + + if (likely(!(*work_data_bits(work) & + WORK_STRUCT_LINKED))) { + /* optimization path, not strictly necessary */ + process_one_work(worker, work); + if (unlikely(!list_empty(&worker->scheduled))) + process_scheduled_works(worker); + } else { + move_linked_works(work, &worker->scheduled, + NULL); + process_scheduled_works(worker); + } + } + + spin_unlock_irq(&cwq->lock); } return 0; @@ -705,16 +779,33 @@ static void wq_barrier_func(struct work_struct *work) * insert_wq_barrier - insert a barrier work * @cwq: cwq to insert barrier into * @barr: wq_barrier to insert - * @head: insertion point + * @target: target work to attach @barr to + * @worker: worker currently executing @target, NULL if @target is not executing * - * Insert barrier @barr into @cwq before @head. + * @barr is linked to @target such that @barr is completed only after + * @target finishes execution. Please note that the ordering + * guarantee is observed only with respect to @target and on the local + * cpu. + * + * Currently, a queued barrier can't be canceled. This is because + * try_to_grab_pending() can't determine whether the work to be + * grabbed is at the head of the queue and thus can't clear LINKED + * flag of the previous work while there must be a valid next work + * after a work with LINKED flag set. + * + * Note that when @worker is non-NULL, @target may be modified + * underneath us, so we can't reliably determine cwq from @target. * * CONTEXT: * spin_lock_irq(cwq->lock). */ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, - struct wq_barrier *barr, struct list_head *head) + struct wq_barrier *barr, + struct work_struct *target, struct worker *worker) { + struct list_head *head; + unsigned int linked = 0; + /* * debugobject calls are safe here even with cwq->lock locked * as we know for sure that this will not trigger any of the @@ -725,8 +816,24 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion(&barr->done); + /* + * If @target is currently being executed, schedule the + * barrier to the worker; otherwise, put it after @target. + */ + if (worker) + head = worker->scheduled.next; + else { + unsigned long *bits = work_data_bits(target); + + head = target->entry.next; + /* there can already be other linked works, inherit and set */ + linked = *bits & WORK_STRUCT_LINKED; + __set_bit(WORK_STRUCT_LINKED_BIT, bits); + } + debug_work_activate(&barr->work); - insert_work(cwq, &barr->work, head, work_color_to_flags(WORK_NO_COLOR)); + insert_work(cwq, &barr->work, head, + work_color_to_flags(WORK_NO_COLOR) | linked); } /** @@ -964,8 +1071,8 @@ EXPORT_SYMBOL_GPL(flush_workqueue); */ int flush_work(struct work_struct *work) { + struct worker *worker = NULL; struct cpu_workqueue_struct *cwq; - struct list_head *prev; struct wq_barrier barr; might_sleep(); @@ -985,14 +1092,14 @@ int flush_work(struct work_struct *work) smp_rmb(); if (unlikely(cwq != get_wq_data(work))) goto already_gone; - prev = &work->entry; } else { - if (!cwq->worker || cwq->worker->current_work != work) + if (cwq->worker && cwq->worker->current_work == work) + worker = cwq->worker; + if (!worker) goto already_gone; - prev = &cwq->worklist; } - insert_wq_barrier(cwq, &barr, prev->next); + insert_wq_barrier(cwq, &barr, work, worker); spin_unlock_irq(&cwq->lock); wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); @@ -1048,16 +1155,19 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) { struct wq_barrier barr; - int running = 0; + struct worker *worker; spin_lock_irq(&cwq->lock); + + worker = NULL; if (unlikely(cwq->worker && cwq->worker->current_work == work)) { - insert_wq_barrier(cwq, &barr, cwq->worklist.next); - running = 1; + worker = cwq->worker; + insert_wq_barrier(cwq, &barr, work, worker); } + spin_unlock_irq(&cwq->lock); - if (unlikely(running)) { + if (unlikely(worker)) { wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); } -- cgit v1.2.3-59-g8ed1b From 1e19ffc63dbbaea7a7d1c63d99c38d3e5a4c7edf Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: implement per-cwq active work limit Add cwq->nr_active, cwq->max_active and cwq->delayed_work. nr_active counts the number of active works per cwq. A work is active if it's flushable (colored) and is on cwq's worklist. If nr_active reaches max_active, new works are queued on cwq->delayed_work and activated later as works on the cwq complete and decrement nr_active. cwq->max_active can be specified via the new @max_active parameter to __create_workqueue() and is set to 1 for all workqueues for now. As each cwq has only single worker now, this double queueing doesn't cause any behavior difference visible to its users. This will be used to reimplement freeze/thaw and implement shared worker pool. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 18 +++++++++--------- kernel/workqueue.c | 39 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 46 insertions(+), 11 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4f4fdba722c3..eb753b7790e5 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -225,11 +225,11 @@ enum { }; extern struct workqueue_struct * -__create_workqueue_key(const char *name, unsigned int flags, +__create_workqueue_key(const char *name, unsigned int flags, int max_active, struct lock_class_key *key, const char *lock_name); #ifdef CONFIG_LOCKDEP -#define __create_workqueue(name, flags) \ +#define __create_workqueue(name, flags, max_active) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ @@ -239,20 +239,20 @@ __create_workqueue_key(const char *name, unsigned int flags, else \ __lock_name = #name; \ \ - __create_workqueue_key((name), (flags), &__key, \ - __lock_name); \ + __create_workqueue_key((name), (flags), (max_active), \ + &__key, __lock_name); \ }) #else -#define __create_workqueue(name, flags) \ - __create_workqueue_key((name), (flags), NULL, NULL) +#define __create_workqueue(name, flags, max_active) \ + __create_workqueue_key((name), (flags), (max_active), NULL, NULL) #endif #define create_workqueue(name) \ - __create_workqueue((name), 0) + __create_workqueue((name), 0, 1) #define create_freezeable_workqueue(name) \ - __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD) + __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD, 1) #define create_singlethread_workqueue(name) \ - __create_workqueue((name), WQ_SINGLE_THREAD) + __create_workqueue((name), WQ_SINGLE_THREAD, 1) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9953d3c7bd10..e541b5db67dd 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -77,6 +77,9 @@ struct cpu_workqueue_struct { int flush_color; /* L: flushing color */ int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ + int nr_active; /* L: nr of active works */ + int max_active; /* I: max active works */ + struct list_head delayed_works; /* L: delayed works */ }; /* @@ -321,14 +324,24 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); + struct list_head *worklist; unsigned long flags; debug_work_activate(work); + spin_lock_irqsave(&cwq->lock, flags); BUG_ON(!list_empty(&work->entry)); + cwq->nr_in_flight[cwq->work_color]++; - insert_work(cwq, work, &cwq->worklist, - work_color_to_flags(cwq->work_color)); + + if (likely(cwq->nr_active < cwq->max_active)) { + cwq->nr_active++; + worklist = &cwq->worklist; + } else + worklist = &cwq->delayed_works; + + insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); + spin_unlock_irqrestore(&cwq->lock, flags); } @@ -584,6 +597,15 @@ static void move_linked_works(struct work_struct *work, struct list_head *head, *nextp = n; } +static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) +{ + struct work_struct *work = list_first_entry(&cwq->delayed_works, + struct work_struct, entry); + + move_linked_works(work, &cwq->worklist, NULL); + cwq->nr_active++; +} + /** * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight * @cwq: cwq of interest @@ -602,6 +624,12 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) return; cwq->nr_in_flight[color]--; + cwq->nr_active--; + + /* one down, submit a delayed one */ + if (!list_empty(&cwq->delayed_works) && + cwq->nr_active < cwq->max_active) + cwq_activate_first_delayed(cwq); /* is flush in progress and are we at the flushing tip? */ if (likely(cwq->flush_color != color)) @@ -1505,6 +1533,7 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) struct workqueue_struct *__create_workqueue_key(const char *name, unsigned int flags, + int max_active, struct lock_class_key *key, const char *lock_name) { @@ -1513,6 +1542,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, bool failed = false; unsigned int cpu; + max_active = clamp_val(max_active, 1, INT_MAX); + wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) goto err; @@ -1544,8 +1575,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cwq->cpu = cpu; cwq->wq = wq; cwq->flush_color = -1; + cwq->max_active = max_active; spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); + INIT_LIST_HEAD(&cwq->delayed_works); init_waitqueue_head(&cwq->more_work); if (failed) @@ -1607,6 +1640,8 @@ void destroy_workqueue(struct workqueue_struct *wq) for (i = 0; i < WORK_NR_COLORS; i++) BUG_ON(cwq->nr_in_flight[i]); + BUG_ON(cwq->nr_active); + BUG_ON(!list_empty(&cwq->delayed_works)); } free_cwqs(wq->cpu_wq); -- cgit v1.2.3-59-g8ed1b From a0a1a5fd4fb15ec61117c759fe9f5c16c53d9e9c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: reimplement workqueue freeze using max_active Currently, workqueue freezing is implemented by marking the worker freezeable and calling try_to_freeze() from dispatch loop. Reimplement it using cwq->limit so that the workqueue is frozen instead of the worker. * workqueue_struct->saved_max_active is added which stores the specified max_active on initialization. * On freeze, all cwq->max_active's are quenched to zero. Freezing is complete when nr_active on all cwqs reach zero. * On thaw, all cwq->max_active's are restored to wq->saved_max_active and the worklist is repopulated. This new implementation allows having single shared pool of workers per cpu. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 7 ++ kernel/power/process.c | 21 +++++- kernel/workqueue.c | 163 +++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 179 insertions(+), 12 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index eb753b7790e5..ab0b7fb99bc2 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -340,4 +340,11 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) #else long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); #endif /* CONFIG_SMP */ + +#ifdef CONFIG_FREEZER +extern void freeze_workqueues_begin(void); +extern bool freeze_workqueues_busy(void); +extern void thaw_workqueues(void); +#endif /* CONFIG_FREEZER */ + #endif diff --git a/kernel/power/process.c b/kernel/power/process.c index 71ae29052ab6..028a99598f49 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -15,6 +15,7 @@ #include #include #include +#include /* * Timeout for stopping processes @@ -35,6 +36,7 @@ static int try_to_freeze_tasks(bool sig_only) struct task_struct *g, *p; unsigned long end_time; unsigned int todo; + bool wq_busy = false; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; @@ -42,6 +44,10 @@ static int try_to_freeze_tasks(bool sig_only) do_gettimeofday(&start); end_time = jiffies + TIMEOUT; + + if (!sig_only) + freeze_workqueues_begin(); + while (true) { todo = 0; read_lock(&tasklist_lock); @@ -63,6 +69,12 @@ static int try_to_freeze_tasks(bool sig_only) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); + + if (!sig_only) { + wq_busy = freeze_workqueues_busy(); + todo += wq_busy; + } + if (!todo || time_after(jiffies, end_time)) break; @@ -86,8 +98,12 @@ static int try_to_freeze_tasks(bool sig_only) */ printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " - "(%d tasks refusing to freeze):\n", - elapsed_csecs / 100, elapsed_csecs % 100, todo); + "(%d tasks refusing to freeze, wq_busy=%d):\n", + elapsed_csecs / 100, elapsed_csecs % 100, + todo - wq_busy, wq_busy); + + thaw_workqueues(); + read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); @@ -157,6 +173,7 @@ void thaw_processes(void) oom_killer_enable(); printk("Restarting tasks ... "); + thaw_workqueues(); thaw_tasks(true); thaw_tasks(false); schedule(); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e541b5db67dd..4d059c532792 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -78,7 +78,7 @@ struct cpu_workqueue_struct { int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ int nr_active; /* L: nr of active works */ - int max_active; /* I: max active works */ + int max_active; /* L: max active works */ struct list_head delayed_works; /* L: delayed works */ }; @@ -108,6 +108,7 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ + int saved_max_active; /* I: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; @@ -228,6 +229,7 @@ static inline void debug_work_deactivate(struct work_struct *work) { } static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); static DEFINE_PER_CPU(struct ida, worker_ida); +static bool workqueue_freezing; /* W: have wqs started freezing? */ static int worker_thread(void *__worker); @@ -745,19 +747,13 @@ static int worker_thread(void *__worker) struct cpu_workqueue_struct *cwq = worker->cwq; DEFINE_WAIT(wait); - if (cwq->wq->flags & WQ_FREEZEABLE) - set_freezable(); - for (;;) { prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); - if (!freezing(current) && - !kthread_should_stop() && + if (!kthread_should_stop() && list_empty(&cwq->worklist)) schedule(); finish_wait(&cwq->more_work, &wait); - try_to_freeze(); - if (kthread_should_stop()) break; @@ -1553,6 +1549,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, goto err; wq->flags = flags; + wq->saved_max_active = max_active; mutex_init(&wq->flush_mutex); atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); @@ -1591,8 +1588,19 @@ struct workqueue_struct *__create_workqueue_key(const char *name, failed = true; } + /* + * workqueue_lock protects global freeze state and workqueues + * list. Grab it, set max_active accordingly and add the new + * workqueue to workqueues list. + */ spin_lock(&workqueue_lock); + + if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) + for_each_possible_cpu(cpu) + get_cwq(cpu, wq)->max_active = 0; + list_add(&wq->list, &workqueues); + spin_unlock(&workqueue_lock); cpu_maps_update_done(); @@ -1621,14 +1629,18 @@ void destroy_workqueue(struct workqueue_struct *wq) { int cpu; + flush_workqueue(wq); + + /* + * wq list is used to freeze wq, remove from list after + * flushing is complete in case freeze races us. + */ cpu_maps_update_begin(); spin_lock(&workqueue_lock); list_del(&wq->list); spin_unlock(&workqueue_lock); cpu_maps_update_done(); - flush_workqueue(wq); - for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); int i; @@ -1722,6 +1734,137 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) EXPORT_SYMBOL_GPL(work_on_cpu); #endif /* CONFIG_SMP */ +#ifdef CONFIG_FREEZER + +/** + * freeze_workqueues_begin - begin freezing workqueues + * + * Start freezing workqueues. After this function returns, all + * freezeable workqueues will queue new works to their frozen_works + * list instead of the cwq ones. + * + * CONTEXT: + * Grabs and releases workqueue_lock and cwq->lock's. + */ +void freeze_workqueues_begin(void) +{ + struct workqueue_struct *wq; + unsigned int cpu; + + spin_lock(&workqueue_lock); + + BUG_ON(workqueue_freezing); + workqueue_freezing = true; + + for_each_possible_cpu(cpu) { + list_for_each_entry(wq, &workqueues, list) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + spin_lock_irq(&cwq->lock); + + if (wq->flags & WQ_FREEZEABLE) + cwq->max_active = 0; + + spin_unlock_irq(&cwq->lock); + } + } + + spin_unlock(&workqueue_lock); +} + +/** + * freeze_workqueues_busy - are freezeable workqueues still busy? + * + * Check whether freezing is complete. This function must be called + * between freeze_workqueues_begin() and thaw_workqueues(). + * + * CONTEXT: + * Grabs and releases workqueue_lock. + * + * RETURNS: + * %true if some freezeable workqueues are still busy. %false if + * freezing is complete. + */ +bool freeze_workqueues_busy(void) +{ + struct workqueue_struct *wq; + unsigned int cpu; + bool busy = false; + + spin_lock(&workqueue_lock); + + BUG_ON(!workqueue_freezing); + + for_each_possible_cpu(cpu) { + /* + * nr_active is monotonically decreasing. It's safe + * to peek without lock. + */ + list_for_each_entry(wq, &workqueues, list) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + if (!(wq->flags & WQ_FREEZEABLE)) + continue; + + BUG_ON(cwq->nr_active < 0); + if (cwq->nr_active) { + busy = true; + goto out_unlock; + } + } + } +out_unlock: + spin_unlock(&workqueue_lock); + return busy; +} + +/** + * thaw_workqueues - thaw workqueues + * + * Thaw workqueues. Normal queueing is restored and all collected + * frozen works are transferred to their respective cwq worklists. + * + * CONTEXT: + * Grabs and releases workqueue_lock and cwq->lock's. + */ +void thaw_workqueues(void) +{ + struct workqueue_struct *wq; + unsigned int cpu; + + spin_lock(&workqueue_lock); + + if (!workqueue_freezing) + goto out_unlock; + + for_each_possible_cpu(cpu) { + list_for_each_entry(wq, &workqueues, list) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + if (!(wq->flags & WQ_FREEZEABLE)) + continue; + + spin_lock_irq(&cwq->lock); + + /* restore max_active and repopulate worklist */ + cwq->max_active = wq->saved_max_active; + + while (!list_empty(&cwq->delayed_works) && + cwq->nr_active < cwq->max_active) + cwq_activate_first_delayed(cwq); + + wake_up(&cwq->more_work); + + spin_unlock_irq(&cwq->lock); + } + } + + workqueue_freezing = false; +out_unlock: + spin_unlock(&workqueue_lock); +} +#endif /* CONFIG_FREEZER */ + void __init init_workqueues(void) { unsigned int cpu; -- cgit v1.2.3-59-g8ed1b From 8b03ae3cde59af9facab7c831b4141515d5dbcc8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: introduce global cwq and unify cwq locks There is one gcwq (global cwq) per each cpu and all cwqs on an cpu point to it. A gcwq contains a lock to be used by all cwqs on the cpu and an ida to give IDs to workers belonging to the cpu. This patch introduces gcwq, moves worker_ida into gcwq and make all cwqs on the same cpu use the cpu's gcwq->lock instead of separate locks. gcwq->ida is now protected by gcwq->lock too. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 160 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 98 insertions(+), 62 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4d059c532792..b043f57516bd 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -40,23 +40,34 @@ * * I: Set during initialization and read-only afterwards. * - * L: cwq->lock protected. Access with cwq->lock held. + * L: gcwq->lock protected. Access with gcwq->lock held. * * F: wq->flush_mutex protected. * * W: workqueue_lock protected. */ +struct global_cwq; struct cpu_workqueue_struct; struct worker { struct work_struct *current_work; /* L: work being processed */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ + struct global_cwq *gcwq; /* I: the associated gcwq */ struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ int id; /* I: worker id */ }; +/* + * Global per-cpu workqueue. + */ +struct global_cwq { + spinlock_t lock; /* the gcwq lock */ + unsigned int cpu; /* I: the associated cpu */ + struct ida worker_ida; /* L: for worker IDs */ +} ____cacheline_aligned_in_smp; + /* * The per-CPU workqueue (if single thread, we always use the first * possible cpu). The lower WORK_STRUCT_FLAG_BITS of @@ -64,14 +75,10 @@ struct worker { * aligned at two's power of the number of flag bits. */ struct cpu_workqueue_struct { - - spinlock_t lock; - + struct global_cwq *gcwq; /* I: the associated gcwq */ struct list_head worklist; wait_queue_head_t more_work; - unsigned int cpu; struct worker *worker; - struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ @@ -228,13 +235,19 @@ static inline void debug_work_deactivate(struct work_struct *work) { } /* Serializes the accesses to the list of workqueues. */ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); -static DEFINE_PER_CPU(struct ida, worker_ida); static bool workqueue_freezing; /* W: have wqs started freezing? */ +static DEFINE_PER_CPU(struct global_cwq, global_cwq); + static int worker_thread(void *__worker); static int singlethread_cpu __read_mostly; +static struct global_cwq *get_gcwq(unsigned int cpu) +{ + return &per_cpu(global_cwq, cpu); +} + static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, struct workqueue_struct *wq) { @@ -303,7 +316,7 @@ static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) * Insert @work into @cwq after @head. * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, @@ -326,12 +339,13 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); + struct global_cwq *gcwq = cwq->gcwq; struct list_head *worklist; unsigned long flags; debug_work_activate(work); - spin_lock_irqsave(&cwq->lock, flags); + spin_lock_irqsave(&gcwq->lock, flags); BUG_ON(!list_empty(&work->entry)); cwq->nr_in_flight[cwq->work_color]++; @@ -344,7 +358,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); - spin_unlock_irqrestore(&cwq->lock, flags); + spin_unlock_irqrestore(&gcwq->lock, flags); } /** @@ -483,39 +497,41 @@ static struct worker *alloc_worker(void) */ static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) { + struct global_cwq *gcwq = cwq->gcwq; int id = -1; struct worker *worker = NULL; - spin_lock(&workqueue_lock); - while (ida_get_new(&per_cpu(worker_ida, cwq->cpu), &id)) { - spin_unlock(&workqueue_lock); - if (!ida_pre_get(&per_cpu(worker_ida, cwq->cpu), GFP_KERNEL)) + spin_lock_irq(&gcwq->lock); + while (ida_get_new(&gcwq->worker_ida, &id)) { + spin_unlock_irq(&gcwq->lock); + if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL)) goto fail; - spin_lock(&workqueue_lock); + spin_lock_irq(&gcwq->lock); } - spin_unlock(&workqueue_lock); + spin_unlock_irq(&gcwq->lock); worker = alloc_worker(); if (!worker) goto fail; + worker->gcwq = gcwq; worker->cwq = cwq; worker->id = id; worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", - cwq->cpu, id); + gcwq->cpu, id); if (IS_ERR(worker->task)) goto fail; if (bind) - kthread_bind(worker->task, cwq->cpu); + kthread_bind(worker->task, gcwq->cpu); return worker; fail: if (id >= 0) { - spin_lock(&workqueue_lock); - ida_remove(&per_cpu(worker_ida, cwq->cpu), id); - spin_unlock(&workqueue_lock); + spin_lock_irq(&gcwq->lock); + ida_remove(&gcwq->worker_ida, id); + spin_unlock_irq(&gcwq->lock); } kfree(worker); return NULL; @@ -528,7 +544,7 @@ fail: * Start @worker. * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void start_worker(struct worker *worker) { @@ -543,7 +559,7 @@ static void start_worker(struct worker *worker) */ static void destroy_worker(struct worker *worker) { - int cpu = worker->cwq->cpu; + struct global_cwq *gcwq = worker->gcwq; int id = worker->id; /* sanity check frenzy */ @@ -553,9 +569,9 @@ static void destroy_worker(struct worker *worker) kthread_stop(worker->task); kfree(worker); - spin_lock(&workqueue_lock); - ida_remove(&per_cpu(worker_ida, cpu), id); - spin_unlock(&workqueue_lock); + spin_lock_irq(&gcwq->lock); + ida_remove(&gcwq->worker_ida, id); + spin_unlock_irq(&gcwq->lock); } /** @@ -573,7 +589,7 @@ static void destroy_worker(struct worker *worker) * nested inside outer list_for_each_entry_safe(). * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void move_linked_works(struct work_struct *work, struct list_head *head, struct work_struct **nextp) @@ -617,7 +633,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) * decrement nr_in_flight of its cwq and handle workqueue flushing. * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) { @@ -664,11 +680,12 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) * call this function to process a work. * * CONTEXT: - * spin_lock_irq(cwq->lock) which is released and regrabbed. + * spin_lock_irq(gcwq->lock) which is released and regrabbed. */ static void process_one_work(struct worker *worker, struct work_struct *work) { struct cpu_workqueue_struct *cwq = worker->cwq; + struct global_cwq *gcwq = cwq->gcwq; work_func_t f = work->func; int work_color; #ifdef CONFIG_LOCKDEP @@ -687,7 +704,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) work_color = get_work_color(work); list_del_init(&work->entry); - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); @@ -707,7 +724,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) dump_stack(); } - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); /* we're done with it, release */ worker->current_work = NULL; @@ -723,7 +740,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) * fetches a work from the top and executes it. * * CONTEXT: - * spin_lock_irq(cwq->lock) which may be released and regrabbed + * spin_lock_irq(gcwq->lock) which may be released and regrabbed * multiple times. */ static void process_scheduled_works(struct worker *worker) @@ -744,6 +761,7 @@ static void process_scheduled_works(struct worker *worker) static int worker_thread(void *__worker) { struct worker *worker = __worker; + struct global_cwq *gcwq = worker->gcwq; struct cpu_workqueue_struct *cwq = worker->cwq; DEFINE_WAIT(wait); @@ -758,11 +776,11 @@ static int worker_thread(void *__worker) break; if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, - get_cpu_mask(cwq->cpu)))) + get_cpu_mask(gcwq->cpu)))) set_cpus_allowed_ptr(worker->task, - get_cpu_mask(cwq->cpu)); + get_cpu_mask(gcwq->cpu)); - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); while (!list_empty(&cwq->worklist)) { struct work_struct *work = @@ -782,7 +800,7 @@ static int worker_thread(void *__worker) } } - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); } return 0; @@ -821,7 +839,7 @@ static void wq_barrier_func(struct work_struct *work) * underneath us, so we can't reliably determine cwq from @target. * * CONTEXT: - * spin_lock_irq(cwq->lock). + * spin_lock_irq(gcwq->lock). */ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, struct wq_barrier *barr, @@ -831,7 +849,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, unsigned int linked = 0; /* - * debugobject calls are safe here even with cwq->lock locked + * debugobject calls are safe here even with gcwq->lock locked * as we know for sure that this will not trigger any of the * checks and call back into the fixup functions where we * might deadlock. @@ -904,8 +922,9 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct global_cwq *gcwq = cwq->gcwq; - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); if (flush_color >= 0) { BUG_ON(cwq->flush_color != -1); @@ -922,7 +941,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, cwq->work_color = work_color; } - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); } if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) @@ -1097,17 +1116,19 @@ int flush_work(struct work_struct *work) { struct worker *worker = NULL; struct cpu_workqueue_struct *cwq; + struct global_cwq *gcwq; struct wq_barrier barr; might_sleep(); cwq = get_wq_data(work); if (!cwq) return 0; + gcwq = cwq->gcwq; lock_map_acquire(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { /* * See the comment near try_to_grab_pending()->smp_rmb(). @@ -1124,12 +1145,12 @@ int flush_work(struct work_struct *work) } insert_wq_barrier(cwq, &barr, work, worker); - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); return 1; already_gone: - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); return 0; } EXPORT_SYMBOL_GPL(flush_work); @@ -1140,6 +1161,7 @@ EXPORT_SYMBOL_GPL(flush_work); */ static int try_to_grab_pending(struct work_struct *work) { + struct global_cwq *gcwq; struct cpu_workqueue_struct *cwq; int ret = -1; @@ -1154,8 +1176,9 @@ static int try_to_grab_pending(struct work_struct *work) cwq = get_wq_data(work); if (!cwq) return ret; + gcwq = cwq->gcwq; - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { /* * This work is queued, but perhaps we locked the wrong cwq. @@ -1170,7 +1193,7 @@ static int try_to_grab_pending(struct work_struct *work) ret = 1; } } - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); return ret; } @@ -1178,10 +1201,11 @@ static int try_to_grab_pending(struct work_struct *work) static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) { + struct global_cwq *gcwq = cwq->gcwq; struct wq_barrier barr; struct worker *worker; - spin_lock_irq(&cwq->lock); + spin_lock_irq(&gcwq->lock); worker = NULL; if (unlikely(cwq->worker && cwq->worker->current_work == work)) { @@ -1189,7 +1213,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, insert_wq_barrier(cwq, &barr, work, worker); } - spin_unlock_irq(&cwq->lock); + spin_unlock_irq(&gcwq->lock); if (unlikely(worker)) { wait_for_completion(&barr.done); @@ -1567,13 +1591,13 @@ struct workqueue_struct *__create_workqueue_key(const char *name, */ for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct global_cwq *gcwq = get_gcwq(cpu); BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK); - cwq->cpu = cpu; + cwq->gcwq = gcwq; cwq->wq = wq; cwq->flush_color = -1; cwq->max_active = max_active; - spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); INIT_LIST_HEAD(&cwq->delayed_works); init_waitqueue_head(&cwq->more_work); @@ -1744,7 +1768,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); * list instead of the cwq ones. * * CONTEXT: - * Grabs and releases workqueue_lock and cwq->lock's. + * Grabs and releases workqueue_lock and gcwq->lock's. */ void freeze_workqueues_begin(void) { @@ -1757,16 +1781,18 @@ void freeze_workqueues_begin(void) workqueue_freezing = true; for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_irq(&gcwq->lock); + list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - spin_lock_irq(&cwq->lock); - if (wq->flags & WQ_FREEZEABLE) cwq->max_active = 0; - - spin_unlock_irq(&cwq->lock); } + + spin_unlock_irq(&gcwq->lock); } spin_unlock(&workqueue_lock); @@ -1825,7 +1851,7 @@ out_unlock: * frozen works are transferred to their respective cwq worklists. * * CONTEXT: - * Grabs and releases workqueue_lock and cwq->lock's. + * Grabs and releases workqueue_lock and gcwq->lock's. */ void thaw_workqueues(void) { @@ -1838,14 +1864,16 @@ void thaw_workqueues(void) goto out_unlock; for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_irq(&gcwq->lock); + list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); if (!(wq->flags & WQ_FREEZEABLE)) continue; - spin_lock_irq(&cwq->lock); - /* restore max_active and repopulate worklist */ cwq->max_active = wq->saved_max_active; @@ -1854,9 +1882,9 @@ void thaw_workqueues(void) cwq_activate_first_delayed(cwq); wake_up(&cwq->more_work); - - spin_unlock_irq(&cwq->lock); } + + spin_unlock_irq(&gcwq->lock); } workqueue_freezing = false; @@ -1869,11 +1897,19 @@ void __init init_workqueues(void) { unsigned int cpu; - for_each_possible_cpu(cpu) - ida_init(&per_cpu(worker_ida, cpu)); - singlethread_cpu = cpumask_first(cpu_possible_mask); hotcpu_notifier(workqueue_cpu_callback, 0); + + /* initialize gcwqs */ + for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_init(&gcwq->lock); + gcwq->cpu = cpu; + + ida_init(&gcwq->worker_ida); + } + keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); } -- cgit v1.2.3-59-g8ed1b From c8e55f360210c1bc49bea5d62bc3939b7ee13483 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: implement worker states Implement worker states. After created, a worker is STARTED. While a worker isn't processing a work, it's IDLE and chained on gcwq->idle_list. While processing a work, a worker is BUSY and chained on gcwq->busy_hash. Also, gcwq now counts the number of all workers and idle ones. worker_thread() is restructured to reflect state transitions. cwq->more_work is removed and waking up a worker makes it check for events. A worker is killed by setting DIE flag while it's IDLE and waking it up. This gives gcwq better visibility of what's going on and allows it to find out whether a work is executing quickly which is necessary to have multiple workers processing the same cwq. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 214 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 173 insertions(+), 41 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b043f57516bd..d64913aa486a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -35,6 +35,17 @@ #include #include +enum { + /* worker flags */ + WORKER_STARTED = 1 << 0, /* started */ + WORKER_DIE = 1 << 1, /* die die die */ + WORKER_IDLE = 1 << 2, /* is idle */ + + BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ + BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, + BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, +}; + /* * Structure fields follow one of the following exclusion rules. * @@ -51,11 +62,18 @@ struct global_cwq; struct cpu_workqueue_struct; struct worker { + /* on idle list while idle, on busy hash table while busy */ + union { + struct list_head entry; /* L: while idle */ + struct hlist_node hentry; /* L: while busy */ + }; + struct work_struct *current_work; /* L: work being processed */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct global_cwq *gcwq; /* I: the associated gcwq */ struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ + unsigned int flags; /* L: flags */ int id; /* I: worker id */ }; @@ -65,6 +83,15 @@ struct worker { struct global_cwq { spinlock_t lock; /* the gcwq lock */ unsigned int cpu; /* I: the associated cpu */ + + int nr_workers; /* L: total number of workers */ + int nr_idle; /* L: currently idle ones */ + + /* workers are chained either in the idle_list or busy_hash */ + struct list_head idle_list; /* L: list of idle workers */ + struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; + /* L: hash of busy workers */ + struct ida worker_ida; /* L: for worker IDs */ } ____cacheline_aligned_in_smp; @@ -77,7 +104,6 @@ struct global_cwq { struct cpu_workqueue_struct { struct global_cwq *gcwq; /* I: the associated gcwq */ struct list_head worklist; - wait_queue_head_t more_work; struct worker *worker; struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ @@ -306,6 +332,33 @@ static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) WORK_STRUCT_WQ_DATA_MASK); } +/** + * busy_worker_head - return the busy hash head for a work + * @gcwq: gcwq of interest + * @work: work to be hashed + * + * Return hash head of @gcwq for @work. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + * + * RETURNS: + * Pointer to the hash head. + */ +static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, + struct work_struct *work) +{ + const int base_shift = ilog2(sizeof(struct work_struct)); + unsigned long v = (unsigned long)work; + + /* simple shift and fold hash, do we need something better? */ + v >>= base_shift; + v += v >> BUSY_WORKER_HASH_ORDER; + v &= BUSY_WORKER_HASH_MASK; + + return &gcwq->busy_hash[v]; +} + /** * insert_work - insert a work into cwq * @cwq: cwq @work belongs to @@ -332,7 +385,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, smp_wmb(); list_add_tail(&work->entry, head); - wake_up(&cwq->more_work); + wake_up_process(cwq->worker->task); } static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, @@ -470,13 +523,59 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, } EXPORT_SYMBOL_GPL(queue_delayed_work_on); +/** + * worker_enter_idle - enter idle state + * @worker: worker which is entering idle state + * + * @worker is entering idle state. Update stats and idle timer if + * necessary. + * + * LOCKING: + * spin_lock_irq(gcwq->lock). + */ +static void worker_enter_idle(struct worker *worker) +{ + struct global_cwq *gcwq = worker->gcwq; + + BUG_ON(worker->flags & WORKER_IDLE); + BUG_ON(!list_empty(&worker->entry) && + (worker->hentry.next || worker->hentry.pprev)); + + worker->flags |= WORKER_IDLE; + gcwq->nr_idle++; + + /* idle_list is LIFO */ + list_add(&worker->entry, &gcwq->idle_list); +} + +/** + * worker_leave_idle - leave idle state + * @worker: worker which is leaving idle state + * + * @worker is leaving idle state. Update stats. + * + * LOCKING: + * spin_lock_irq(gcwq->lock). + */ +static void worker_leave_idle(struct worker *worker) +{ + struct global_cwq *gcwq = worker->gcwq; + + BUG_ON(!(worker->flags & WORKER_IDLE)); + worker->flags &= ~WORKER_IDLE; + gcwq->nr_idle--; + list_del_init(&worker->entry); +} + static struct worker *alloc_worker(void) { struct worker *worker; worker = kzalloc(sizeof(*worker), GFP_KERNEL); - if (worker) + if (worker) { + INIT_LIST_HEAD(&worker->entry); INIT_LIST_HEAD(&worker->scheduled); + } return worker; } @@ -541,13 +640,16 @@ fail: * start_worker - start a newly created worker * @worker: worker to start * - * Start @worker. + * Make the gcwq aware of @worker and start it. * * CONTEXT: * spin_lock_irq(gcwq->lock). */ static void start_worker(struct worker *worker) { + worker->flags |= WORKER_STARTED; + worker->gcwq->nr_workers++; + worker_enter_idle(worker); wake_up_process(worker->task); } @@ -555,7 +657,10 @@ static void start_worker(struct worker *worker) * destroy_worker - destroy a workqueue worker * @worker: worker to be destroyed * - * Destroy @worker. + * Destroy @worker and adjust @gcwq stats accordingly. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which is released and regrabbed. */ static void destroy_worker(struct worker *worker) { @@ -566,12 +671,21 @@ static void destroy_worker(struct worker *worker) BUG_ON(worker->current_work); BUG_ON(!list_empty(&worker->scheduled)); + if (worker->flags & WORKER_STARTED) + gcwq->nr_workers--; + if (worker->flags & WORKER_IDLE) + gcwq->nr_idle--; + + list_del_init(&worker->entry); + worker->flags |= WORKER_DIE; + + spin_unlock_irq(&gcwq->lock); + kthread_stop(worker->task); kfree(worker); spin_lock_irq(&gcwq->lock); ida_remove(&gcwq->worker_ida, id); - spin_unlock_irq(&gcwq->lock); } /** @@ -686,6 +800,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) { struct cpu_workqueue_struct *cwq = worker->cwq; struct global_cwq *gcwq = cwq->gcwq; + struct hlist_head *bwh = busy_worker_head(gcwq, work); work_func_t f = work->func; int work_color; #ifdef CONFIG_LOCKDEP @@ -700,6 +815,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) #endif /* claim and process */ debug_work_deactivate(work); + hlist_add_head(&worker->hentry, bwh); worker->current_work = work; work_color = get_work_color(work); list_del_init(&work->entry); @@ -727,6 +843,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) spin_lock_irq(&gcwq->lock); /* we're done with it, release */ + hlist_del_init(&worker->hentry); worker->current_work = NULL; cwq_dec_nr_in_flight(cwq, work_color); } @@ -763,47 +880,56 @@ static int worker_thread(void *__worker) struct worker *worker = __worker; struct global_cwq *gcwq = worker->gcwq; struct cpu_workqueue_struct *cwq = worker->cwq; - DEFINE_WAIT(wait); - for (;;) { - prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); - if (!kthread_should_stop() && - list_empty(&cwq->worklist)) - schedule(); - finish_wait(&cwq->more_work, &wait); +woke_up: + if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, + get_cpu_mask(gcwq->cpu)))) + set_cpus_allowed_ptr(worker->task, get_cpu_mask(gcwq->cpu)); - if (kthread_should_stop()) - break; + spin_lock_irq(&gcwq->lock); - if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, - get_cpu_mask(gcwq->cpu)))) - set_cpus_allowed_ptr(worker->task, - get_cpu_mask(gcwq->cpu)); + /* DIE can be set only while we're idle, checking here is enough */ + if (worker->flags & WORKER_DIE) { + spin_unlock_irq(&gcwq->lock); + return 0; + } - spin_lock_irq(&gcwq->lock); + worker_leave_idle(worker); - while (!list_empty(&cwq->worklist)) { - struct work_struct *work = - list_first_entry(&cwq->worklist, - struct work_struct, entry); - - if (likely(!(*work_data_bits(work) & - WORK_STRUCT_LINKED))) { - /* optimization path, not strictly necessary */ - process_one_work(worker, work); - if (unlikely(!list_empty(&worker->scheduled))) - process_scheduled_works(worker); - } else { - move_linked_works(work, &worker->scheduled, - NULL); + /* + * ->scheduled list can only be filled while a worker is + * preparing to process a work or actually processing it. + * Make sure nobody diddled with it while I was sleeping. + */ + BUG_ON(!list_empty(&worker->scheduled)); + + while (!list_empty(&cwq->worklist)) { + struct work_struct *work = + list_first_entry(&cwq->worklist, + struct work_struct, entry); + + if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { + /* optimization path, not strictly necessary */ + process_one_work(worker, work); + if (unlikely(!list_empty(&worker->scheduled))) process_scheduled_works(worker); - } + } else { + move_linked_works(work, &worker->scheduled, NULL); + process_scheduled_works(worker); } - - spin_unlock_irq(&gcwq->lock); } - return 0; + /* + * gcwq->lock is held and there's no work to process, sleep. + * Workers are woken up only while holding gcwq->lock, so + * setting the current state before releasing gcwq->lock is + * enough to prevent losing any event. + */ + worker_enter_idle(worker); + __set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irq(&gcwq->lock); + schedule(); + goto woke_up; } struct wq_barrier { @@ -1600,7 +1726,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cwq->max_active = max_active; INIT_LIST_HEAD(&cwq->worklist); INIT_LIST_HEAD(&cwq->delayed_works); - init_waitqueue_head(&cwq->more_work); if (failed) continue; @@ -1651,7 +1776,7 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key); */ void destroy_workqueue(struct workqueue_struct *wq) { - int cpu; + unsigned int cpu; flush_workqueue(wq); @@ -1670,8 +1795,10 @@ void destroy_workqueue(struct workqueue_struct *wq) int i; if (cwq->worker) { + spin_lock_irq(&cwq->gcwq->lock); destroy_worker(cwq->worker); cwq->worker = NULL; + spin_unlock_irq(&cwq->gcwq->lock); } for (i = 0; i < WORK_NR_COLORS; i++) @@ -1881,7 +2008,7 @@ void thaw_workqueues(void) cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); - wake_up(&cwq->more_work); + wake_up_process(cwq->worker->task); } spin_unlock_irq(&gcwq->lock); @@ -1896,6 +2023,7 @@ out_unlock: void __init init_workqueues(void) { unsigned int cpu; + int i; singlethread_cpu = cpumask_first(cpu_possible_mask); hotcpu_notifier(workqueue_cpu_callback, 0); @@ -1907,6 +2035,10 @@ void __init init_workqueues(void) spin_lock_init(&gcwq->lock); gcwq->cpu = cpu; + INIT_LIST_HEAD(&gcwq->idle_list); + for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) + INIT_HLIST_HEAD(&gcwq->busy_hash[i]); + ida_init(&gcwq->worker_ida); } -- cgit v1.2.3-59-g8ed1b From db7bccf45cb87522096b8f43144e31ca605a9f24 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:12 +0200 Subject: workqueue: reimplement CPU hotplugging support using trustee Reimplement CPU hotplugging support using trustee thread. On CPU down, a trustee thread is created and each step of CPU down is executed by the trustee and workqueue_cpu_callback() simply drives and waits for trustee state transitions. CPU down operation no longer waits for works to be drained but trustee sticks around till all pending works have been completed. If CPU is brought back up while works are still draining, workqueue_cpu_callback() tells trustee to step down and tell workers to rebind to the cpu. As it's difficult to tell whether cwqs are empty if it's freezing or frozen, trustee doesn't consider draining to be complete while a gcwq is freezing or frozen (tracked by new GCWQ_FREEZING flag). Also, workers which get unbound from their cpu are marked with WORKER_ROGUE. Trustee based implementation doesn't bring any new feature at this point but it will be used to manage worker pool when dynamic shared worker pool is implemented. Signed-off-by: Tejun Heo --- include/linux/cpu.h | 2 + kernel/workqueue.c | 293 +++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 279 insertions(+), 16 deletions(-) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index de6b1722cdca..4823af64e9db 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -71,6 +71,8 @@ enum { /* migration should happen before other stuff but after perf */ CPU_PRI_PERF = 20, CPU_PRI_MIGRATION = 10, + /* prepare workqueues for other notifiers */ + CPU_PRI_WORKQUEUE = 5, }; #ifdef CONFIG_SMP diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d64913aa486a..f57855f718d7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -36,14 +36,27 @@ #include enum { + /* global_cwq flags */ + GCWQ_FREEZING = 1 << 3, /* freeze in progress */ + /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ WORKER_DIE = 1 << 1, /* die die die */ WORKER_IDLE = 1 << 2, /* is idle */ + WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ + + /* gcwq->trustee_state */ + TRUSTEE_START = 0, /* start */ + TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */ + TRUSTEE_BUTCHER = 2, /* butcher workers */ + TRUSTEE_RELEASE = 3, /* release workers */ + TRUSTEE_DONE = 4, /* trustee is done */ BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, + + TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ }; /* @@ -83,6 +96,7 @@ struct worker { struct global_cwq { spinlock_t lock; /* the gcwq lock */ unsigned int cpu; /* I: the associated cpu */ + unsigned int flags; /* L: GCWQ_* flags */ int nr_workers; /* L: total number of workers */ int nr_idle; /* L: currently idle ones */ @@ -93,6 +107,10 @@ struct global_cwq { /* L: hash of busy workers */ struct ida worker_ida; /* L: for worker IDs */ + + struct task_struct *trustee; /* L: for gcwq shutdown */ + unsigned int trustee_state; /* L: trustee state */ + wait_queue_head_t trustee_wait; /* trustee wait */ } ____cacheline_aligned_in_smp; /* @@ -148,6 +166,10 @@ struct workqueue_struct { #endif }; +#define for_each_busy_worker(worker, i, pos, gcwq) \ + for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ + hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) + #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; @@ -546,6 +568,9 @@ static void worker_enter_idle(struct worker *worker) /* idle_list is LIFO */ list_add(&worker->entry, &gcwq->idle_list); + + if (unlikely(worker->flags & WORKER_ROGUE)) + wake_up_all(&gcwq->trustee_wait); } /** @@ -622,8 +647,15 @@ static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) if (IS_ERR(worker->task)) goto fail; + /* + * A rogue worker will become a regular one if CPU comes + * online later on. Make sure every worker has + * PF_THREAD_BOUND set. + */ if (bind) kthread_bind(worker->task, gcwq->cpu); + else + worker->task->flags |= PF_THREAD_BOUND; return worker; fail: @@ -882,10 +914,6 @@ static int worker_thread(void *__worker) struct cpu_workqueue_struct *cwq = worker->cwq; woke_up: - if (unlikely(!cpumask_equal(&worker->task->cpus_allowed, - get_cpu_mask(gcwq->cpu)))) - set_cpus_allowed_ptr(worker->task, get_cpu_mask(gcwq->cpu)); - spin_lock_irq(&gcwq->lock); /* DIE can be set only while we're idle, checking here is enough */ @@ -895,7 +923,7 @@ woke_up: } worker_leave_idle(worker); - +recheck: /* * ->scheduled list can only be filled while a worker is * preparing to process a work or actually processing it. @@ -908,6 +936,22 @@ woke_up: list_first_entry(&cwq->worklist, struct work_struct, entry); + /* + * The following is a rather inefficient way to close + * race window against cpu hotplug operations. Will + * be replaced soon. + */ + if (unlikely(!(worker->flags & WORKER_ROGUE) && + !cpumask_equal(&worker->task->cpus_allowed, + get_cpu_mask(gcwq->cpu)))) { + spin_unlock_irq(&gcwq->lock); + set_cpus_allowed_ptr(worker->task, + get_cpu_mask(gcwq->cpu)); + cpu_relax(); + spin_lock_irq(&gcwq->lock); + goto recheck; + } + if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { /* optimization path, not strictly necessary */ process_one_work(worker, work); @@ -1812,29 +1856,237 @@ void destroy_workqueue(struct workqueue_struct *wq) } EXPORT_SYMBOL_GPL(destroy_workqueue); +/* + * CPU hotplug. + * + * CPU hotplug is implemented by allowing cwqs to be detached from + * CPU, running with unbound workers and allowing them to be + * reattached later if the cpu comes back online. A separate thread + * is created to govern cwqs in such state and is called the trustee. + * + * Trustee states and their descriptions. + * + * START Command state used on startup. On CPU_DOWN_PREPARE, a + * new trustee is started with this state. + * + * IN_CHARGE Once started, trustee will enter this state after + * making all existing workers rogue. DOWN_PREPARE waits + * for trustee to enter this state. After reaching + * IN_CHARGE, trustee tries to execute the pending + * worklist until it's empty and the state is set to + * BUTCHER, or the state is set to RELEASE. + * + * BUTCHER Command state which is set by the cpu callback after + * the cpu has went down. Once this state is set trustee + * knows that there will be no new works on the worklist + * and once the worklist is empty it can proceed to + * killing idle workers. + * + * RELEASE Command state which is set by the cpu callback if the + * cpu down has been canceled or it has come online + * again. After recognizing this state, trustee stops + * trying to drain or butcher and transits to DONE. + * + * DONE Trustee will enter this state after BUTCHER or RELEASE + * is complete. + * + * trustee CPU draining + * took over down complete + * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE + * | | ^ + * | CPU is back online v return workers | + * ----------------> RELEASE -------------- + */ + +/** + * trustee_wait_event_timeout - timed event wait for trustee + * @cond: condition to wait for + * @timeout: timeout in jiffies + * + * wait_event_timeout() for trustee to use. Handles locking and + * checks for RELEASE request. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. To be used by trustee. + * + * RETURNS: + * Positive indicating left time if @cond is satisfied, 0 if timed + * out, -1 if canceled. + */ +#define trustee_wait_event_timeout(cond, timeout) ({ \ + long __ret = (timeout); \ + while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \ + __ret) { \ + spin_unlock_irq(&gcwq->lock); \ + __wait_event_timeout(gcwq->trustee_wait, (cond) || \ + (gcwq->trustee_state == TRUSTEE_RELEASE), \ + __ret); \ + spin_lock_irq(&gcwq->lock); \ + } \ + gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \ +}) + +/** + * trustee_wait_event - event wait for trustee + * @cond: condition to wait for + * + * wait_event() for trustee to use. Automatically handles locking and + * checks for CANCEL request. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. To be used by trustee. + * + * RETURNS: + * 0 if @cond is satisfied, -1 if canceled. + */ +#define trustee_wait_event(cond) ({ \ + long __ret1; \ + __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\ + __ret1 < 0 ? -1 : 0; \ +}) + +static int __cpuinit trustee_thread(void *__gcwq) +{ + struct global_cwq *gcwq = __gcwq; + struct worker *worker; + struct hlist_node *pos; + int i; + + BUG_ON(gcwq->cpu != smp_processor_id()); + + spin_lock_irq(&gcwq->lock); + /* + * Make all multithread workers rogue. Trustee must be bound + * to the target cpu and can't be cancelled. + */ + BUG_ON(gcwq->cpu != smp_processor_id()); + + list_for_each_entry(worker, &gcwq->idle_list, entry) + if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) + worker->flags |= WORKER_ROGUE; + + for_each_busy_worker(worker, i, pos, gcwq) + if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) + worker->flags |= WORKER_ROGUE; + + /* + * We're now in charge. Notify and proceed to drain. We need + * to keep the gcwq running during the whole CPU down + * procedure as other cpu hotunplug callbacks may need to + * flush currently running tasks. + */ + gcwq->trustee_state = TRUSTEE_IN_CHARGE; + wake_up_all(&gcwq->trustee_wait); + + /* + * The original cpu is in the process of dying and may go away + * anytime now. When that happens, we and all workers would + * be migrated to other cpus. Try draining any left work. + * Note that if the gcwq is frozen, there may be frozen works + * in freezeable cwqs. Don't declare completion while frozen. + */ + while (gcwq->nr_workers != gcwq->nr_idle || + gcwq->flags & GCWQ_FREEZING || + gcwq->trustee_state == TRUSTEE_IN_CHARGE) { + /* give a breather */ + if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) + break; + } + + /* notify completion */ + gcwq->trustee = NULL; + gcwq->trustee_state = TRUSTEE_DONE; + wake_up_all(&gcwq->trustee_wait); + spin_unlock_irq(&gcwq->lock); + return 0; +} + +/** + * wait_trustee_state - wait for trustee to enter the specified state + * @gcwq: gcwq the trustee of interest belongs to + * @state: target state to wait for + * + * Wait for the trustee to reach @state. DONE is already matched. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. To be used by cpu_callback. + */ +static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) +{ + if (!(gcwq->trustee_state == state || + gcwq->trustee_state == TRUSTEE_DONE)) { + spin_unlock_irq(&gcwq->lock); + __wait_event(gcwq->trustee_wait, + gcwq->trustee_state == state || + gcwq->trustee_state == TRUSTEE_DONE); + spin_lock_irq(&gcwq->lock); + } +} + static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct cpu_workqueue_struct *cwq; - struct workqueue_struct *wq; + struct global_cwq *gcwq = get_gcwq(cpu); + struct task_struct *new_trustee = NULL; + struct worker *worker; + struct hlist_node *pos; + unsigned long flags; + int i; action &= ~CPU_TASKS_FROZEN; - list_for_each_entry(wq, &workqueues, list) { - if (wq->flags & WQ_SINGLE_THREAD) - continue; + switch (action) { + case CPU_DOWN_PREPARE: + new_trustee = kthread_create(trustee_thread, gcwq, + "workqueue_trustee/%d\n", cpu); + if (IS_ERR(new_trustee)) + return notifier_from_errno(PTR_ERR(new_trustee)); + kthread_bind(new_trustee, cpu); + } - cwq = get_cwq(cpu, wq); + /* some are called w/ irq disabled, don't disturb irq status */ + spin_lock_irqsave(&gcwq->lock, flags); - switch (action) { - case CPU_POST_DEAD: - flush_workqueue(wq); - break; + switch (action) { + case CPU_DOWN_PREPARE: + /* initialize trustee and tell it to acquire the gcwq */ + BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE); + gcwq->trustee = new_trustee; + gcwq->trustee_state = TRUSTEE_START; + wake_up_process(gcwq->trustee); + wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); + break; + + case CPU_POST_DEAD: + gcwq->trustee_state = TRUSTEE_BUTCHER; + break; + + case CPU_DOWN_FAILED: + case CPU_ONLINE: + if (gcwq->trustee_state != TRUSTEE_DONE) { + gcwq->trustee_state = TRUSTEE_RELEASE; + wake_up_process(gcwq->trustee); + wait_trustee_state(gcwq, TRUSTEE_DONE); } + + /* clear ROGUE from all multithread workers */ + list_for_each_entry(worker, &gcwq->idle_list, entry) + if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) + worker->flags &= ~WORKER_ROGUE; + + for_each_busy_worker(worker, i, pos, gcwq) + if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) + worker->flags &= ~WORKER_ROGUE; + break; } + spin_unlock_irqrestore(&gcwq->lock, flags); + return notifier_from_errno(0); } @@ -1912,6 +2164,9 @@ void freeze_workqueues_begin(void) spin_lock_irq(&gcwq->lock); + BUG_ON(gcwq->flags & GCWQ_FREEZING); + gcwq->flags |= GCWQ_FREEZING; + list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); @@ -1995,6 +2250,9 @@ void thaw_workqueues(void) spin_lock_irq(&gcwq->lock); + BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); + gcwq->flags &= ~GCWQ_FREEZING; + list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); @@ -2026,7 +2284,7 @@ void __init init_workqueues(void) int i; singlethread_cpu = cpumask_first(cpu_possible_mask); - hotcpu_notifier(workqueue_cpu_callback, 0); + hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ for_each_possible_cpu(cpu) { @@ -2040,6 +2298,9 @@ void __init init_workqueues(void) INIT_HLIST_HEAD(&gcwq->busy_hash[i]); ida_init(&gcwq->worker_ida); + + gcwq->trustee_state = TRUSTEE_DONE; + init_waitqueue_head(&gcwq->trustee_wait); } keventd_wq = create_workqueue("events"); -- cgit v1.2.3-59-g8ed1b From 502ca9d819792e7d79b6e002afe9094c641fe410 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: make single thread workqueue shared worker pool friendly Reimplement st (single thread) workqueue so that it's friendly to shared worker pool. It was originally implemented by confining st workqueues to use cwq of a fixed cpu and always having a worker for the cpu. This implementation isn't very friendly to shared worker pool and suboptimal in that it ends up crossing cpu boundaries often. Reimplement st workqueue using dynamic single cpu binding and cwq->limit. WQ_SINGLE_THREAD is replaced with WQ_SINGLE_CPU. In a single cpu workqueue, at most single cwq is bound to the wq at any given time. Arbitration is done using atomic accesses to wq->single_cpu when queueing a work. Once bound, the binding stays till the workqueue is drained. Note that the binding is never broken while a workqueue is frozen. This is because idle cwqs may have works waiting in delayed_works queue while frozen. On thaw, the cwq is restarted if there are any delayed works or unbound otherwise. When combined with max_active limit of 1, single cpu workqueue has exactly the same execution properties as the original single thread workqueue while allowing sharing of per-cpu workers. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 6 +-- kernel/workqueue.c | 135 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 103 insertions(+), 38 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ab0b7fb99bc2..10611f7fc809 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -221,7 +221,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } enum { WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ - WQ_SINGLE_THREAD = 1 << 1, /* no per-cpu worker */ + WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ }; extern struct workqueue_struct * @@ -250,9 +250,9 @@ __create_workqueue_key(const char *name, unsigned int flags, int max_active, #define create_workqueue(name) \ __create_workqueue((name), 0, 1) #define create_freezeable_workqueue(name) \ - __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD, 1) + __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU, 1) #define create_singlethread_workqueue(name) \ - __create_workqueue((name), WQ_SINGLE_THREAD, 1) + __create_workqueue((name), WQ_SINGLE_CPU, 1) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f57855f718d7..cfb8aa567e17 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -114,8 +114,7 @@ struct global_cwq { } ____cacheline_aligned_in_smp; /* - * The per-CPU workqueue (if single thread, we always use the first - * possible cpu). The lower WORK_STRUCT_FLAG_BITS of + * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of * work_struct->data are used for flags and thus cwqs need to be * aligned at two's power of the number of flag bits. */ @@ -159,6 +158,8 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ + unsigned long single_cpu; /* cpu for single cpu wq */ + int saved_max_active; /* I: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP @@ -289,8 +290,6 @@ static DEFINE_PER_CPU(struct global_cwq, global_cwq); static int worker_thread(void *__worker); -static int singlethread_cpu __read_mostly; - static struct global_cwq *get_gcwq(unsigned int cpu) { return &per_cpu(global_cwq, cpu); @@ -302,14 +301,6 @@ static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, return per_cpu_ptr(wq->cpu_wq, cpu); } -static struct cpu_workqueue_struct *target_cwq(unsigned int cpu, - struct workqueue_struct *wq) -{ - if (unlikely(wq->flags & WQ_SINGLE_THREAD)) - cpu = singlethread_cpu; - return get_cwq(cpu, wq); -} - static unsigned int work_color_to_flags(int color) { return color << WORK_STRUCT_COLOR_SHIFT; @@ -410,17 +401,87 @@ static void insert_work(struct cpu_workqueue_struct *cwq, wake_up_process(cwq->worker->task); } +/** + * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing + * @cwq: cwq to unbind + * + * Try to unbind @cwq from single cpu workqueue processing. If + * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + */ +static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) +{ + struct workqueue_struct *wq = cwq->wq; + struct global_cwq *gcwq = cwq->gcwq; + + BUG_ON(wq->single_cpu != gcwq->cpu); + /* + * Unbind from workqueue if @cwq is not frozen. If frozen, + * thaw_workqueues() will either restart processing on this + * cpu or unbind if empty. This keeps works queued while + * frozen fully ordered and flushable. + */ + if (likely(!(gcwq->flags & GCWQ_FREEZING))) { + smp_wmb(); /* paired with cmpxchg() in __queue_work() */ + wq->single_cpu = NR_CPUS; + } +} + static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { - struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq); - struct global_cwq *gcwq = cwq->gcwq; + struct global_cwq *gcwq; + struct cpu_workqueue_struct *cwq; struct list_head *worklist; unsigned long flags; + bool arbitrate; debug_work_activate(work); - spin_lock_irqsave(&gcwq->lock, flags); + /* determine gcwq to use */ + if (!(wq->flags & WQ_SINGLE_CPU)) { + /* just use the requested cpu for multicpu workqueues */ + gcwq = get_gcwq(cpu); + spin_lock_irqsave(&gcwq->lock, flags); + } else { + unsigned int req_cpu = cpu; + + /* + * It's a bit more complex for single cpu workqueues. + * We first need to determine which cpu is going to be + * used. If no cpu is currently serving this + * workqueue, arbitrate using atomic accesses to + * wq->single_cpu; otherwise, use the current one. + */ + retry: + cpu = wq->single_cpu; + arbitrate = cpu == NR_CPUS; + if (arbitrate) + cpu = req_cpu; + + gcwq = get_gcwq(cpu); + spin_lock_irqsave(&gcwq->lock, flags); + + /* + * The following cmpxchg() is a full barrier paired + * with smp_wmb() in cwq_unbind_single_cpu() and + * guarantees that all changes to wq->st_* fields are + * visible on the new cpu after this point. + */ + if (arbitrate) + cmpxchg(&wq->single_cpu, NR_CPUS, cpu); + + if (unlikely(wq->single_cpu != cpu)) { + spin_unlock_irqrestore(&gcwq->lock, flags); + goto retry; + } + } + + /* gcwq determined, get cwq and queue */ + cwq = get_cwq(gcwq->cpu, wq); + BUG_ON(!list_empty(&work->entry)); cwq->nr_in_flight[cwq->work_color]++; @@ -530,7 +591,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0); + set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -790,10 +851,14 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) cwq->nr_in_flight[color]--; cwq->nr_active--; - /* one down, submit a delayed one */ - if (!list_empty(&cwq->delayed_works) && - cwq->nr_active < cwq->max_active) - cwq_activate_first_delayed(cwq); + if (!list_empty(&cwq->delayed_works)) { + /* one down, submit a delayed one */ + if (cwq->nr_active < cwq->max_active) + cwq_activate_first_delayed(cwq); + } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) { + /* this was the last work, unbind from single cpu */ + cwq_unbind_single_cpu(cwq); + } /* is flush in progress and are we at the flushing tip? */ if (likely(cwq->flush_color != color)) @@ -1727,7 +1792,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct lock_class_key *key, const char *lock_name) { - bool singlethread = flags & WQ_SINGLE_THREAD; struct workqueue_struct *wq; bool failed = false; unsigned int cpu; @@ -1748,6 +1812,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_overflow); + wq->single_cpu = NR_CPUS; + wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); @@ -1773,8 +1839,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, if (failed) continue; - cwq->worker = create_worker(cwq, - cpu_online(cpu) && !singlethread); + cwq->worker = create_worker(cwq, cpu_online(cpu)); if (cwq->worker) start_worker(cwq->worker); else @@ -1958,18 +2023,16 @@ static int __cpuinit trustee_thread(void *__gcwq) spin_lock_irq(&gcwq->lock); /* - * Make all multithread workers rogue. Trustee must be bound - * to the target cpu and can't be cancelled. + * Make all workers rogue. Trustee must be bound to the + * target cpu and can't be cancelled. */ BUG_ON(gcwq->cpu != smp_processor_id()); list_for_each_entry(worker, &gcwq->idle_list, entry) - if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) - worker->flags |= WORKER_ROGUE; + worker->flags |= WORKER_ROGUE; for_each_busy_worker(worker, i, pos, gcwq) - if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) - worker->flags |= WORKER_ROGUE; + worker->flags |= WORKER_ROGUE; /* * We're now in charge. Notify and proceed to drain. We need @@ -2074,14 +2137,12 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, wait_trustee_state(gcwq, TRUSTEE_DONE); } - /* clear ROGUE from all multithread workers */ + /* clear ROGUE from all workers */ list_for_each_entry(worker, &gcwq->idle_list, entry) - if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) - worker->flags &= ~WORKER_ROGUE; + worker->flags &= ~WORKER_ROGUE; for_each_busy_worker(worker, i, pos, gcwq) - if (!(worker->cwq->wq->flags & WQ_SINGLE_THREAD)) - worker->flags &= ~WORKER_ROGUE; + worker->flags &= ~WORKER_ROGUE; break; } @@ -2266,6 +2327,11 @@ void thaw_workqueues(void) cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); + /* perform delayed unbind from single cpu if empty */ + if (wq->single_cpu == gcwq->cpu && + !cwq->nr_active && list_empty(&cwq->delayed_works)) + cwq_unbind_single_cpu(cwq); + wake_up_process(cwq->worker->task); } @@ -2283,7 +2349,6 @@ void __init init_workqueues(void) unsigned int cpu; int i; - singlethread_cpu = cpumask_first(cpu_possible_mask); hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ -- cgit v1.2.3-59-g8ed1b From 8cca0eea3964b72b14e8c3f88e3a40bef7b9113e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: add find_worker_executing_work() and track current_cwq Now that all the workers are tracked by gcwq, we can find which worker is executing a work from gcwq. Implement find_worker_executing_work() and make worker track its current_cwq so that we can find things the other way around. This will be used to implement non-reentrant wqs. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cfb8aa567e17..c276dec75ea4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -82,6 +82,7 @@ struct worker { }; struct work_struct *current_work; /* L: work being processed */ + struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct global_cwq *gcwq; /* I: the associated gcwq */ @@ -372,6 +373,59 @@ static struct hlist_head *busy_worker_head(struct global_cwq *gcwq, return &gcwq->busy_hash[v]; } +/** + * __find_worker_executing_work - find worker which is executing a work + * @gcwq: gcwq of interest + * @bwh: hash head as returned by busy_worker_head() + * @work: work to find worker for + * + * Find a worker which is executing @work on @gcwq. @bwh should be + * the hash head obtained by calling busy_worker_head() with the same + * work. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + * + * RETURNS: + * Pointer to worker which is executing @work if found, NULL + * otherwise. + */ +static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, + struct hlist_head *bwh, + struct work_struct *work) +{ + struct worker *worker; + struct hlist_node *tmp; + + hlist_for_each_entry(worker, tmp, bwh, hentry) + if (worker->current_work == work) + return worker; + return NULL; +} + +/** + * find_worker_executing_work - find worker which is executing a work + * @gcwq: gcwq of interest + * @work: work to find worker for + * + * Find a worker which is executing @work on @gcwq. This function is + * identical to __find_worker_executing_work() except that this + * function calculates @bwh itself. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + * + * RETURNS: + * Pointer to worker which is executing @work if found, NULL + * otherwise. + */ +static struct worker *find_worker_executing_work(struct global_cwq *gcwq, + struct work_struct *work) +{ + return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), + work); +} + /** * insert_work - insert a work into cwq * @cwq: cwq @work belongs to @@ -914,6 +968,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) debug_work_deactivate(work); hlist_add_head(&worker->hentry, bwh); worker->current_work = work; + worker->current_cwq = cwq; work_color = get_work_color(work); list_del_init(&work->entry); @@ -942,6 +997,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) /* we're done with it, release */ hlist_del_init(&worker->hentry); worker->current_work = NULL; + worker->current_cwq = NULL; cwq_dec_nr_in_flight(cwq, work_color); } -- cgit v1.2.3-59-g8ed1b From 7a22ad757ec75186ad43a5b4670fa7423ee8f480 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: carry cpu number in work data once execution starts To implement non-reentrant workqueue, the last gcwq a work was executed on must be reliably obtainable as long as the work structure is valid even if the previous workqueue has been destroyed. To achieve this, work->data will be overloaded to carry the last cpu number once execution starts so that the previous gcwq can be located reliably. This means that cwq can't be obtained from work after execution starts but only gcwq. Implement set_work_{cwq|cpu}(), get_work_[g]cwq() and clear_work_data() to set work data to the cpu number when starting execution, access the overloaded work data and clear it after cancellation. queue_delayed_work_on() is updated to preserve the last cpu while in-flight in timer and other callers which depended on getting cwq from work after execution starts are converted to depend on gcwq instead. * Anton Blanchard fixed compile error on powerpc due to missing linux/threads.h include. Signed-off-by: Tejun Heo Cc: Anton Blanchard --- include/linux/workqueue.h | 7 +- kernel/workqueue.c | 163 +++++++++++++++++++++++++++++----------------- 2 files changed, 109 insertions(+), 61 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 10611f7fc809..0a7814131e66 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -9,6 +9,7 @@ #include #include #include +#include #include struct workqueue_struct; @@ -59,6 +60,7 @@ enum { WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, + WORK_STRUCT_NO_CPU = NR_CPUS << WORK_STRUCT_FLAG_BITS, }; struct work_struct { @@ -70,8 +72,9 @@ struct work_struct { #endif }; -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) -#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_STATIC) +#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) +#define WORK_DATA_STATIC_INIT() \ + ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) struct delayed_work { struct work_struct work; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c276dec75ea4..c68277c204ab 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -319,31 +319,71 @@ static int work_next_color(int color) } /* - * Set the workqueue on which a work item is to be run - * - Must *only* be called if the pending flag is set + * Work data points to the cwq while a work is on queue. Once + * execution starts, it points to the cpu the work was last on. This + * can be distinguished by comparing the data value against + * PAGE_OFFSET. + * + * set_work_{cwq|cpu}() and clear_work_data() can be used to set the + * cwq, cpu or clear work->data. These functions should only be + * called while the work is owned - ie. while the PENDING bit is set. + * + * get_work_[g]cwq() can be used to obtain the gcwq or cwq + * corresponding to a work. gcwq is available once the work has been + * queued anywhere after initialization. cwq is available only from + * queueing until execution starts. */ -static inline void set_wq_data(struct work_struct *work, - struct cpu_workqueue_struct *cwq, - unsigned long extra_flags) +static inline void set_work_data(struct work_struct *work, unsigned long data, + unsigned long flags) { BUG_ON(!work_pending(work)); + atomic_long_set(&work->data, data | flags | work_static(work)); +} - atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) | - WORK_STRUCT_PENDING | extra_flags); +static void set_work_cwq(struct work_struct *work, + struct cpu_workqueue_struct *cwq, + unsigned long extra_flags) +{ + set_work_data(work, (unsigned long)cwq, + WORK_STRUCT_PENDING | extra_flags); } -/* - * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued. - */ -static inline void clear_wq_data(struct work_struct *work) +static void set_work_cpu(struct work_struct *work, unsigned int cpu) +{ + set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); +} + +static void clear_work_data(struct work_struct *work) +{ + set_work_data(work, WORK_STRUCT_NO_CPU, 0); +} + +static inline unsigned long get_work_data(struct work_struct *work) +{ + return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK; +} + +static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) { - atomic_long_set(&work->data, work_static(work)); + unsigned long data = get_work_data(work); + + return data >= PAGE_OFFSET ? (void *)data : NULL; } -static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) +static struct global_cwq *get_work_gcwq(struct work_struct *work) { - return (void *)(atomic_long_read(&work->data) & - WORK_STRUCT_WQ_DATA_MASK); + unsigned long data = get_work_data(work); + unsigned int cpu; + + if (data >= PAGE_OFFSET) + return ((struct cpu_workqueue_struct *)data)->gcwq; + + cpu = data >> WORK_STRUCT_FLAG_BITS; + if (cpu == NR_CPUS) + return NULL; + + BUG_ON(cpu >= num_possible_cpus()); + return get_gcwq(cpu); } /** @@ -443,7 +483,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, unsigned int extra_flags) { /* we own @work, set data and link */ - set_wq_data(work, cwq, extra_flags); + set_work_cwq(work, cwq, extra_flags); /* * Ensure that we get the right work->data if we see the @@ -599,7 +639,7 @@ EXPORT_SYMBOL_GPL(queue_work_on); static void delayed_work_timer_fn(unsigned long __data) { struct delayed_work *dwork = (struct delayed_work *)__data; - struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); + struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); __queue_work(smp_processor_id(), cwq->wq, &dwork->work); } @@ -639,13 +679,19 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work = &dwork->work; if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + struct global_cwq *gcwq = get_work_gcwq(work); + unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id(); + BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); timer_stats_timer_set_start_info(&dwork->timer); - - /* This stores cwq for the moment, for the timer_fn */ - set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0); + /* + * This stores cwq for the moment, for the timer_fn. + * Note that the work's gcwq is preserved to allow + * reentrance detection for delayed works. + */ + set_work_cwq(work, get_cwq(lcpu, wq), 0); timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -970,11 +1016,14 @@ static void process_one_work(struct worker *worker, struct work_struct *work) worker->current_work = work; worker->current_cwq = cwq; work_color = get_work_color(work); + + BUG_ON(get_work_cwq(work) != cwq); + /* record the current cpu number in the work data and dequeue */ + set_work_cpu(work, gcwq->cpu); list_del_init(&work->entry); spin_unlock_irq(&gcwq->lock); - BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); lock_map_acquire(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); @@ -1406,37 +1455,39 @@ EXPORT_SYMBOL_GPL(flush_workqueue); int flush_work(struct work_struct *work) { struct worker *worker = NULL; - struct cpu_workqueue_struct *cwq; struct global_cwq *gcwq; + struct cpu_workqueue_struct *cwq; struct wq_barrier barr; might_sleep(); - cwq = get_wq_data(work); - if (!cwq) + gcwq = get_work_gcwq(work); + if (!gcwq) return 0; - gcwq = cwq->gcwq; - - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { /* * See the comment near try_to_grab_pending()->smp_rmb(). - * If it was re-queued under us we are not going to wait. + * If it was re-queued to a different gcwq under us, we + * are not going to wait. */ smp_rmb(); - if (unlikely(cwq != get_wq_data(work))) + cwq = get_work_cwq(work); + if (unlikely(!cwq || gcwq != cwq->gcwq)) goto already_gone; } else { - if (cwq->worker && cwq->worker->current_work == work) - worker = cwq->worker; + worker = find_worker_executing_work(gcwq, work); if (!worker) goto already_gone; + cwq = worker->current_cwq; } insert_wq_barrier(cwq, &barr, work, worker); spin_unlock_irq(&gcwq->lock); + + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); + wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); return 1; @@ -1453,7 +1504,6 @@ EXPORT_SYMBOL_GPL(flush_work); static int try_to_grab_pending(struct work_struct *work) { struct global_cwq *gcwq; - struct cpu_workqueue_struct *cwq; int ret = -1; if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) @@ -1463,24 +1513,23 @@ static int try_to_grab_pending(struct work_struct *work) * The queueing is in progress, or it is already queued. Try to * steal it from ->worklist without clearing WORK_STRUCT_PENDING. */ - - cwq = get_wq_data(work); - if (!cwq) + gcwq = get_work_gcwq(work); + if (!gcwq) return ret; - gcwq = cwq->gcwq; spin_lock_irq(&gcwq->lock); if (!list_empty(&work->entry)) { /* - * This work is queued, but perhaps we locked the wrong cwq. + * This work is queued, but perhaps we locked the wrong gcwq. * In that case we must see the new value after rmb(), see * insert_work()->wmb(). */ smp_rmb(); - if (cwq == get_wq_data(work)) { + if (gcwq == get_work_gcwq(work)) { debug_work_deactivate(work); list_del_init(&work->entry); - cwq_dec_nr_in_flight(cwq, get_work_color(work)); + cwq_dec_nr_in_flight(get_work_cwq(work), + get_work_color(work)); ret = 1; } } @@ -1489,20 +1538,16 @@ static int try_to_grab_pending(struct work_struct *work) return ret; } -static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, - struct work_struct *work) +static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) { - struct global_cwq *gcwq = cwq->gcwq; struct wq_barrier barr; struct worker *worker; spin_lock_irq(&gcwq->lock); - worker = NULL; - if (unlikely(cwq->worker && cwq->worker->current_work == work)) { - worker = cwq->worker; - insert_wq_barrier(cwq, &barr, work, worker); - } + worker = find_worker_executing_work(gcwq, work); + if (unlikely(worker)) + insert_wq_barrier(worker->current_cwq, &barr, work, worker); spin_unlock_irq(&gcwq->lock); @@ -1514,8 +1559,6 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, static void wait_on_work(struct work_struct *work) { - struct cpu_workqueue_struct *cwq; - struct workqueue_struct *wq; int cpu; might_sleep(); @@ -1523,14 +1566,8 @@ static void wait_on_work(struct work_struct *work) lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); - cwq = get_wq_data(work); - if (!cwq) - return; - - wq = cwq->wq; - for_each_possible_cpu(cpu) - wait_on_cpu_work(get_cwq(cpu, wq), work); + wait_on_cpu_work(get_gcwq(cpu), work); } static int __cancel_work_timer(struct work_struct *work, @@ -1545,7 +1582,7 @@ static int __cancel_work_timer(struct work_struct *work, wait_on_work(work); } while (unlikely(ret < 0)); - clear_wq_data(work); + clear_work_data(work); return ret; } @@ -1647,7 +1684,7 @@ EXPORT_SYMBOL(schedule_delayed_work); void flush_delayed_work(struct delayed_work *dwork) { if (del_timer_sync(&dwork->timer)) { - __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq, + __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq, &dwork->work); put_cpu(); } @@ -2405,6 +2442,14 @@ void __init init_workqueues(void) unsigned int cpu; int i; + /* + * The pointer part of work->data is either pointing to the + * cwq or contains the cpu number the work ran last on. Make + * sure cpu number won't overflow into kernel pointer area so + * that they can be distinguished. + */ + BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); + hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ -- cgit v1.2.3-59-g8ed1b From 18aa9effad4adb2c1efe123af4eb24fec9f59b30 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: implement WQ_NON_REENTRANT With gcwq managing all the workers and work->data pointing to the last gcwq it was on, non-reentrance can be easily implemented by checking whether the work is still running on the previous gcwq on queueing. Implement it. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 1 + kernel/workqueue.c | 32 +++++++++++++++++++++++++++++--- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0a7814131e66..07cf5e5f91cb 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -225,6 +225,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } enum { WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ + WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ }; extern struct workqueue_struct * diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c68277c204ab..bce1074bdec1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -534,11 +534,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, debug_work_activate(work); - /* determine gcwq to use */ + /* + * Determine gcwq to use. SINGLE_CPU is inherently + * NON_REENTRANT, so test it first. + */ if (!(wq->flags & WQ_SINGLE_CPU)) { - /* just use the requested cpu for multicpu workqueues */ + struct global_cwq *last_gcwq; + + /* + * It's multi cpu. If @wq is non-reentrant and @work + * was previously on a different cpu, it might still + * be running there, in which case the work needs to + * be queued on that cpu to guarantee non-reentrance. + */ gcwq = get_gcwq(cpu); - spin_lock_irqsave(&gcwq->lock, flags); + if (wq->flags & WQ_NON_REENTRANT && + (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { + struct worker *worker; + + spin_lock_irqsave(&last_gcwq->lock, flags); + + worker = find_worker_executing_work(last_gcwq, work); + + if (worker && worker->current_cwq->wq == wq) + gcwq = last_gcwq; + else { + /* meh... not running there, queue here */ + spin_unlock_irqrestore(&last_gcwq->lock, flags); + spin_lock_irqsave(&gcwq->lock, flags); + } + } else + spin_lock_irqsave(&gcwq->lock, flags); } else { unsigned int req_cpu = cpu; -- cgit v1.2.3-59-g8ed1b From 7e11629d0efec829cbf62366143ba1081944a70e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: use shared worklist and pool all workers per cpu Use gcwq->worklist instead of cwq->worklist and break the strict association between a cwq and its worker. All works queued on a cpu are queued on gcwq->worklist and processed by any available worker on the gcwq. As there no longer is strict association between a cwq and its worker, whether a work is executing can now only be determined by calling [__]find_worker_executing_work(). After this change, the only association between a cwq and its worker is that a cwq puts a worker into shared worker pool on creation and kills it on destruction. As all workqueues are still limited to max_active of one, this means that there are always at least as many workers as active works and thus there's no danger for deadlock. The break of strong association between cwqs and workers requires somewhat clumsy changes to current_is_keventd() and destroy_workqueue(). Dynamic worker pool management will remove both clumsy changes. current_is_keventd() won't be necessary at all as the only reason it exists is to avoid queueing a work from a work which will be allowed just fine. The clumsy part of destroy_workqueue() is added because a worker can only be destroyed while idle and there's no guarantee a worker is idle when its wq is going down. With dynamic pool management, workers are not associated with workqueues at all and only idle ones will be submitted to destroy_workqueue() so the code won't be necessary anymore. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 131 ++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 99 insertions(+), 32 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bce1074bdec1..e697d6c72daa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -34,6 +34,7 @@ #include #include #include +#include enum { /* global_cwq flags */ @@ -72,7 +73,6 @@ enum { */ struct global_cwq; -struct cpu_workqueue_struct; struct worker { /* on idle list while idle, on busy hash table while busy */ @@ -86,7 +86,6 @@ struct worker { struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct global_cwq *gcwq; /* I: the associated gcwq */ - struct cpu_workqueue_struct *cwq; /* I: the associated cwq */ unsigned int flags; /* L: flags */ int id; /* I: worker id */ }; @@ -96,6 +95,7 @@ struct worker { */ struct global_cwq { spinlock_t lock; /* the gcwq lock */ + struct list_head worklist; /* L: list of pending works */ unsigned int cpu; /* I: the associated cpu */ unsigned int flags; /* L: GCWQ_* flags */ @@ -121,7 +121,6 @@ struct global_cwq { */ struct cpu_workqueue_struct { struct global_cwq *gcwq; /* I: the associated gcwq */ - struct list_head worklist; struct worker *worker; struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ @@ -386,6 +385,32 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) return get_gcwq(cpu); } +/* Return the first worker. Safe with preemption disabled */ +static struct worker *first_worker(struct global_cwq *gcwq) +{ + if (unlikely(list_empty(&gcwq->idle_list))) + return NULL; + + return list_first_entry(&gcwq->idle_list, struct worker, entry); +} + +/** + * wake_up_worker - wake up an idle worker + * @gcwq: gcwq to wake worker for + * + * Wake up the first idle worker of @gcwq. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + */ +static void wake_up_worker(struct global_cwq *gcwq) +{ + struct worker *worker = first_worker(gcwq); + + if (likely(worker)) + wake_up_process(worker->task); +} + /** * busy_worker_head - return the busy hash head for a work * @gcwq: gcwq of interest @@ -467,13 +492,14 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq, } /** - * insert_work - insert a work into cwq + * insert_work - insert a work into gcwq * @cwq: cwq @work belongs to * @work: work to insert * @head: insertion point * @extra_flags: extra WORK_STRUCT_* flags to set * - * Insert @work into @cwq after @head. + * Insert @work which belongs to @cwq into @gcwq after @head. + * @extra_flags is or'd to work_struct flags. * * CONTEXT: * spin_lock_irq(gcwq->lock). @@ -492,7 +518,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, smp_wmb(); list_add_tail(&work->entry, head); - wake_up_process(cwq->worker->task); + wake_up_worker(cwq->gcwq); } /** @@ -608,7 +634,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, if (likely(cwq->nr_active < cwq->max_active)) { cwq->nr_active++; - worklist = &cwq->worklist; + worklist = &gcwq->worklist; } else worklist = &cwq->delayed_works; @@ -793,10 +819,10 @@ static struct worker *alloc_worker(void) /** * create_worker - create a new workqueue worker - * @cwq: cwq the new worker will belong to + * @gcwq: gcwq the new worker will belong to * @bind: whether to set affinity to @cpu or not * - * Create a new worker which is bound to @cwq. The returned worker + * Create a new worker which is bound to @gcwq. The returned worker * can be started by calling start_worker() or destroyed using * destroy_worker(). * @@ -806,9 +832,8 @@ static struct worker *alloc_worker(void) * RETURNS: * Pointer to the newly created worker. */ -static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) +static struct worker *create_worker(struct global_cwq *gcwq, bool bind) { - struct global_cwq *gcwq = cwq->gcwq; int id = -1; struct worker *worker = NULL; @@ -826,7 +851,6 @@ static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind) goto fail; worker->gcwq = gcwq; - worker->cwq = cwq; worker->id = id; worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", @@ -953,7 +977,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) struct work_struct *work = list_first_entry(&cwq->delayed_works, struct work_struct, entry); - move_linked_works(work, &cwq->worklist, NULL); + move_linked_works(work, &cwq->gcwq->worklist, NULL); cwq->nr_active++; } @@ -1021,11 +1045,12 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) */ static void process_one_work(struct worker *worker, struct work_struct *work) { - struct cpu_workqueue_struct *cwq = worker->cwq; + struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct global_cwq *gcwq = cwq->gcwq; struct hlist_head *bwh = busy_worker_head(gcwq, work); work_func_t f = work->func; int work_color; + struct worker *collision; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -1036,6 +1061,18 @@ static void process_one_work(struct worker *worker, struct work_struct *work) */ struct lockdep_map lockdep_map = work->lockdep_map; #endif + /* + * A single work shouldn't be executed concurrently by + * multiple workers on a single cpu. Check whether anyone is + * already processing the work. If so, defer the work to the + * currently executing one. + */ + collision = __find_worker_executing_work(gcwq, bwh, work); + if (unlikely(collision)) { + move_linked_works(work, &collision->scheduled, NULL); + return; + } + /* claim and process */ debug_work_deactivate(work); hlist_add_head(&worker->hentry, bwh); @@ -1043,7 +1080,6 @@ static void process_one_work(struct worker *worker, struct work_struct *work) worker->current_cwq = cwq; work_color = get_work_color(work); - BUG_ON(get_work_cwq(work) != cwq); /* record the current cpu number in the work data and dequeue */ set_work_cpu(work, gcwq->cpu); list_del_init(&work->entry); @@ -1107,7 +1143,6 @@ static int worker_thread(void *__worker) { struct worker *worker = __worker; struct global_cwq *gcwq = worker->gcwq; - struct cpu_workqueue_struct *cwq = worker->cwq; woke_up: spin_lock_irq(&gcwq->lock); @@ -1127,9 +1162,9 @@ recheck: */ BUG_ON(!list_empty(&worker->scheduled)); - while (!list_empty(&cwq->worklist)) { + while (!list_empty(&gcwq->worklist)) { struct work_struct *work = - list_first_entry(&cwq->worklist, + list_first_entry(&gcwq->worklist, struct work_struct, entry); /* @@ -1844,18 +1879,37 @@ int keventd_up(void) int current_is_keventd(void) { - struct cpu_workqueue_struct *cwq; - int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ - int ret = 0; + bool found = false; + unsigned int cpu; - BUG_ON(!keventd_wq); + /* + * There no longer is one-to-one relation between worker and + * work queue and a worker task might be unbound from its cpu + * if the cpu was offlined. Match all busy workers. This + * function will go away once dynamic pool is implemented. + */ + for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + struct worker *worker; + struct hlist_node *pos; + unsigned long flags; + int i; - cwq = get_cwq(cpu, keventd_wq); - if (current == cwq->worker->task) - ret = 1; + spin_lock_irqsave(&gcwq->lock, flags); - return ret; + for_each_busy_worker(worker, i, pos, gcwq) { + if (worker->task == current) { + found = true; + break; + } + } + + spin_unlock_irqrestore(&gcwq->lock, flags); + if (found) + break; + } + return found; } static struct cpu_workqueue_struct *alloc_cwqs(void) @@ -1953,12 +2007,11 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cwq->wq = wq; cwq->flush_color = -1; cwq->max_active = max_active; - INIT_LIST_HEAD(&cwq->worklist); INIT_LIST_HEAD(&cwq->delayed_works); if (failed) continue; - cwq->worker = create_worker(cwq, cpu_online(cpu)); + cwq->worker = create_worker(gcwq, cpu_online(cpu)); if (cwq->worker) start_worker(cwq->worker); else @@ -2020,13 +2073,26 @@ void destroy_workqueue(struct workqueue_struct *wq) for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct global_cwq *gcwq = cwq->gcwq; int i; if (cwq->worker) { - spin_lock_irq(&cwq->gcwq->lock); + retry: + spin_lock_irq(&gcwq->lock); + /* + * Worker can only be destroyed while idle. + * Wait till it becomes idle. This is ugly + * and prone to starvation. It will go away + * once dynamic worker pool is implemented. + */ + if (!(cwq->worker->flags & WORKER_IDLE)) { + spin_unlock_irq(&gcwq->lock); + msleep(100); + goto retry; + } destroy_worker(cwq->worker); cwq->worker = NULL; - spin_unlock_irq(&cwq->gcwq->lock); + spin_unlock_irq(&gcwq->lock); } for (i = 0; i < WORK_NR_COLORS; i++) @@ -2324,7 +2390,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); * * Start freezing workqueues. After this function returns, all * freezeable workqueues will queue new works to their frozen_works - * list instead of the cwq ones. + * list instead of gcwq->worklist. * * CONTEXT: * Grabs and releases workqueue_lock and gcwq->lock's. @@ -2410,7 +2476,7 @@ out_unlock: * thaw_workqueues - thaw workqueues * * Thaw workqueues. Normal queueing is restored and all collected - * frozen works are transferred to their respective cwq worklists. + * frozen works are transferred to their respective gcwq worklists. * * CONTEXT: * Grabs and releases workqueue_lock and gcwq->lock's. @@ -2483,6 +2549,7 @@ void __init init_workqueues(void) struct global_cwq *gcwq = get_gcwq(cpu); spin_lock_init(&gcwq->lock); + INIT_LIST_HEAD(&gcwq->worklist); gcwq->cpu = cpu; INIT_LIST_HEAD(&gcwq->idle_list); -- cgit v1.2.3-59-g8ed1b From d302f0178223802a1e496ba90c66193b7721c9c1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:13 +0200 Subject: workqueue: implement worker_{set|clr}_flags() Implement worker_{set|clr}_flags() to manipulate worker flags. These are currently simple wrappers but logics to track the current worker state and the current level of concurrency will be added. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 48 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 8 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e697d6c72daa..4c31fde092c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -411,6 +411,38 @@ static void wake_up_worker(struct global_cwq *gcwq) wake_up_process(worker->task); } +/** + * worker_set_flags - set worker flags + * @worker: worker to set flags for + * @flags: flags to set + * @wakeup: wakeup an idle worker if necessary + * + * Set @flags in @worker->flags. + * + * LOCKING: + * spin_lock_irq(gcwq->lock). + */ +static inline void worker_set_flags(struct worker *worker, unsigned int flags, + bool wakeup) +{ + worker->flags |= flags; +} + +/** + * worker_clr_flags - clear worker flags + * @worker: worker to set flags for + * @flags: flags to clear + * + * Clear @flags in @worker->flags. + * + * LOCKING: + * spin_lock_irq(gcwq->lock). + */ +static inline void worker_clr_flags(struct worker *worker, unsigned int flags) +{ + worker->flags &= ~flags; +} + /** * busy_worker_head - return the busy hash head for a work * @gcwq: gcwq of interest @@ -776,7 +808,7 @@ static void worker_enter_idle(struct worker *worker) BUG_ON(!list_empty(&worker->entry) && (worker->hentry.next || worker->hentry.pprev)); - worker->flags |= WORKER_IDLE; + worker_set_flags(worker, WORKER_IDLE, false); gcwq->nr_idle++; /* idle_list is LIFO */ @@ -800,7 +832,7 @@ static void worker_leave_idle(struct worker *worker) struct global_cwq *gcwq = worker->gcwq; BUG_ON(!(worker->flags & WORKER_IDLE)); - worker->flags &= ~WORKER_IDLE; + worker_clr_flags(worker, WORKER_IDLE); gcwq->nr_idle--; list_del_init(&worker->entry); } @@ -890,7 +922,7 @@ fail: */ static void start_worker(struct worker *worker) { - worker->flags |= WORKER_STARTED; + worker_set_flags(worker, WORKER_STARTED, false); worker->gcwq->nr_workers++; worker_enter_idle(worker); wake_up_process(worker->task); @@ -920,7 +952,7 @@ static void destroy_worker(struct worker *worker) gcwq->nr_idle--; list_del_init(&worker->entry); - worker->flags |= WORKER_DIE; + worker_set_flags(worker, WORKER_DIE, false); spin_unlock_irq(&gcwq->lock); @@ -2214,10 +2246,10 @@ static int __cpuinit trustee_thread(void *__gcwq) BUG_ON(gcwq->cpu != smp_processor_id()); list_for_each_entry(worker, &gcwq->idle_list, entry) - worker->flags |= WORKER_ROGUE; + worker_set_flags(worker, WORKER_ROGUE, false); for_each_busy_worker(worker, i, pos, gcwq) - worker->flags |= WORKER_ROGUE; + worker_set_flags(worker, WORKER_ROGUE, false); /* * We're now in charge. Notify and proceed to drain. We need @@ -2324,10 +2356,10 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, /* clear ROGUE from all workers */ list_for_each_entry(worker, &gcwq->idle_list, entry) - worker->flags &= ~WORKER_ROGUE; + worker_clr_flags(worker, WORKER_ROGUE); for_each_busy_worker(worker, i, pos, gcwq) - worker->flags &= ~WORKER_ROGUE; + worker_clr_flags(worker, WORKER_ROGUE); break; } -- cgit v1.2.3-59-g8ed1b From e22bee782b3b00bd4534ae9b1c5fb2e8e6573c5c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: implement concurrency managed dynamic worker pool Instead of creating a worker for each cwq and putting it into the shared pool, manage per-cpu workers dynamically. Works aren't supposed to be cpu cycle hogs and maintaining just enough concurrency to prevent work processing from stalling due to lack of processing context is optimal. gcwq keeps the number of concurrent active workers to minimum but no less. As long as there's one or more running workers on the cpu, no new worker is scheduled so that works can be processed in batch as much as possible but when the last running worker blocks, gcwq immediately schedules new worker so that the cpu doesn't sit idle while there are works to be processed. gcwq always keeps at least single idle worker around. When a new worker is necessary and the worker is the last idle one, the worker assumes the role of "manager" and manages the worker pool - ie. creates another worker. Forward-progress is guaranteed by having dedicated rescue workers for workqueues which may be necessary while creating a new worker. When the manager is having problem creating a new worker, mayday timer activates and rescue workers are summoned to the cpu and execute works which might be necessary to create new workers. Trustee is expanded to serve the role of manager while a CPU is being taken down and stays down. As no new works are supposed to be queued on a dead cpu, it just needs to drain all the existing ones. Trustee continues to try to create new workers and summon rescuers as long as there are pending works. If the CPU is brought back up while the trustee is still trying to drain the gcwq from the previous offlining, the trustee will kill all idles ones and tell workers which are still busy to rebind to the cpu, and pass control over to gcwq which assumes the manager role as necessary. Concurrency managed worker pool reduces the number of workers drastically. Only workers which are necessary to keep the processing going are created and kept. Also, it reduces cache footprint by avoiding unnecessarily switching contexts between different workers. Please note that this patch does not increase max_active of any workqueue. All workqueues can still only process one work per cpu. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 8 +- kernel/workqueue.c | 936 +++++++++++++++++++++++++++++++++++++++++----- kernel/workqueue_sched.h | 13 +- 3 files changed, 841 insertions(+), 116 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 07cf5e5f91cb..b8f4ec45c40a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -226,6 +226,7 @@ enum { WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ + WQ_RESCUER = 1 << 3, /* has an rescue worker */ }; extern struct workqueue_struct * @@ -252,11 +253,12 @@ __create_workqueue_key(const char *name, unsigned int flags, int max_active, #endif #define create_workqueue(name) \ - __create_workqueue((name), 0, 1) + __create_workqueue((name), WQ_RESCUER, 1) #define create_freezeable_workqueue(name) \ - __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU, 1) + __create_workqueue((name), \ + WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1) #define create_singlethread_workqueue(name) \ - __create_workqueue((name), WQ_SINGLE_CPU, 1) + __create_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4c31fde092c6..0ad46523b423 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -34,17 +34,25 @@ #include #include #include -#include + +#include "workqueue_sched.h" enum { /* global_cwq flags */ + GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ + GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */ + GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ GCWQ_FREEZING = 1 << 3, /* freeze in progress */ /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ WORKER_DIE = 1 << 1, /* die die die */ WORKER_IDLE = 1 << 2, /* is idle */ + WORKER_PREP = 1 << 3, /* preparing to run works */ WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ + WORKER_REBIND = 1 << 5, /* mom is home, come back */ + + WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND, /* gcwq->trustee_state */ TRUSTEE_START = 0, /* start */ @@ -57,7 +65,19 @@ enum { BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, + MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ + IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ + + MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ + MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ + CREATE_COOLDOWN = HZ, /* time to breath after fail */ TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ + + /* + * Rescue workers are used only on emergencies and shared by + * all cpus. Give -20. + */ + RESCUER_NICE_LEVEL = -20, }; /* @@ -65,8 +85,16 @@ enum { * * I: Set during initialization and read-only afterwards. * + * P: Preemption protected. Disabling preemption is enough and should + * only be modified and accessed from the local cpu. + * * L: gcwq->lock protected. Access with gcwq->lock held. * + * X: During normal operation, modification requires gcwq->lock and + * should be done only from local cpu. Either disabling preemption + * on local cpu or grabbing gcwq->lock is enough for read access. + * While trustee is in charge, it's identical to L. + * * F: wq->flush_mutex protected. * * W: workqueue_lock protected. @@ -74,6 +102,10 @@ enum { struct global_cwq; +/* + * The poor guys doing the actual heavy lifting. All on-duty workers + * are either serving the manager role, on idle list or on busy hash. + */ struct worker { /* on idle list while idle, on busy hash table while busy */ union { @@ -86,12 +118,17 @@ struct worker { struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct global_cwq *gcwq; /* I: the associated gcwq */ - unsigned int flags; /* L: flags */ + /* 64 bytes boundary on 64bit, 32 on 32bit */ + unsigned long last_active; /* L: last active timestamp */ + unsigned int flags; /* X: flags */ int id; /* I: worker id */ + struct work_struct rebind_work; /* L: rebind worker to cpu */ }; /* - * Global per-cpu workqueue. + * Global per-cpu workqueue. There's one and only one for each cpu + * and all works are queued and processed here regardless of their + * target workqueues. */ struct global_cwq { spinlock_t lock; /* the gcwq lock */ @@ -103,15 +140,19 @@ struct global_cwq { int nr_idle; /* L: currently idle ones */ /* workers are chained either in the idle_list or busy_hash */ - struct list_head idle_list; /* L: list of idle workers */ + struct list_head idle_list; /* X: list of idle workers */ struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; /* L: hash of busy workers */ + struct timer_list idle_timer; /* L: worker idle timeout */ + struct timer_list mayday_timer; /* L: SOS timer for dworkers */ + struct ida worker_ida; /* L: for worker IDs */ struct task_struct *trustee; /* L: for gcwq shutdown */ unsigned int trustee_state; /* L: trustee state */ wait_queue_head_t trustee_wait; /* trustee wait */ + struct worker *first_idle; /* L: first idle worker */ } ____cacheline_aligned_in_smp; /* @@ -121,7 +162,6 @@ struct global_cwq { */ struct cpu_workqueue_struct { struct global_cwq *gcwq; /* I: the associated gcwq */ - struct worker *worker; struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ @@ -160,6 +200,9 @@ struct workqueue_struct { unsigned long single_cpu; /* cpu for single cpu wq */ + cpumask_var_t mayday_mask; /* cpus requesting rescue */ + struct worker *rescuer; /* I: rescue worker */ + int saved_max_active; /* I: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP @@ -286,7 +329,13 @@ static DEFINE_SPINLOCK(workqueue_lock); static LIST_HEAD(workqueues); static bool workqueue_freezing; /* W: have wqs started freezing? */ +/* + * The almighty global cpu workqueues. nr_running is the only field + * which is expected to be used frequently by other cpus via + * try_to_wake_up(). Put it in a separate cacheline. + */ static DEFINE_PER_CPU(struct global_cwq, global_cwq); +static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); static int worker_thread(void *__worker); @@ -295,6 +344,11 @@ static struct global_cwq *get_gcwq(unsigned int cpu) return &per_cpu(global_cwq, cpu); } +static atomic_t *get_gcwq_nr_running(unsigned int cpu) +{ + return &per_cpu(gcwq_nr_running, cpu); +} + static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, struct workqueue_struct *wq) { @@ -385,6 +439,63 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) return get_gcwq(cpu); } +/* + * Policy functions. These define the policies on how the global + * worker pool is managed. Unless noted otherwise, these functions + * assume that they're being called with gcwq->lock held. + */ + +/* + * Need to wake up a worker? Called from anything but currently + * running workers. + */ +static bool need_more_worker(struct global_cwq *gcwq) +{ + atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); + + return !list_empty(&gcwq->worklist) && !atomic_read(nr_running); +} + +/* Can I start working? Called from busy but !running workers. */ +static bool may_start_working(struct global_cwq *gcwq) +{ + return gcwq->nr_idle; +} + +/* Do I need to keep working? Called from currently running workers. */ +static bool keep_working(struct global_cwq *gcwq) +{ + atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); + + return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1; +} + +/* Do we need a new worker? Called from manager. */ +static bool need_to_create_worker(struct global_cwq *gcwq) +{ + return need_more_worker(gcwq) && !may_start_working(gcwq); +} + +/* Do I need to be the manager? */ +static bool need_to_manage_workers(struct global_cwq *gcwq) +{ + return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS; +} + +/* Do we have too many workers and should some go away? */ +static bool too_many_workers(struct global_cwq *gcwq) +{ + bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS; + int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */ + int nr_busy = gcwq->nr_workers - nr_idle; + + return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; +} + +/* + * Wake up functions. + */ + /* Return the first worker. Safe with preemption disabled */ static struct worker *first_worker(struct global_cwq *gcwq) { @@ -412,12 +523,77 @@ static void wake_up_worker(struct global_cwq *gcwq) } /** - * worker_set_flags - set worker flags + * wq_worker_waking_up - a worker is waking up + * @task: task waking up + * @cpu: CPU @task is waking up to + * + * This function is called during try_to_wake_up() when a worker is + * being awoken. + * + * CONTEXT: + * spin_lock_irq(rq->lock) + */ +void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) +{ + struct worker *worker = kthread_data(task); + + if (likely(!(worker->flags & WORKER_NOT_RUNNING))) + atomic_inc(get_gcwq_nr_running(cpu)); +} + +/** + * wq_worker_sleeping - a worker is going to sleep + * @task: task going to sleep + * @cpu: CPU in question, must be the current CPU number + * + * This function is called during schedule() when a busy worker is + * going to sleep. Worker on the same cpu can be woken up by + * returning pointer to its task. + * + * CONTEXT: + * spin_lock_irq(rq->lock) + * + * RETURNS: + * Worker task on @cpu to wake up, %NULL if none. + */ +struct task_struct *wq_worker_sleeping(struct task_struct *task, + unsigned int cpu) +{ + struct worker *worker = kthread_data(task), *to_wakeup = NULL; + struct global_cwq *gcwq = get_gcwq(cpu); + atomic_t *nr_running = get_gcwq_nr_running(cpu); + + if (unlikely(worker->flags & WORKER_NOT_RUNNING)) + return NULL; + + /* this can only happen on the local cpu */ + BUG_ON(cpu != raw_smp_processor_id()); + + /* + * The counterpart of the following dec_and_test, implied mb, + * worklist not empty test sequence is in insert_work(). + * Please read comment there. + * + * NOT_RUNNING is clear. This means that trustee is not in + * charge and we're running on the local cpu w/ rq lock held + * and preemption disabled, which in turn means that none else + * could be manipulating idle_list, so dereferencing idle_list + * without gcwq lock is safe. + */ + if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) + to_wakeup = first_worker(gcwq); + return to_wakeup ? to_wakeup->task : NULL; +} + +/** + * worker_set_flags - set worker flags and adjust nr_running accordingly * @worker: worker to set flags for * @flags: flags to set * @wakeup: wakeup an idle worker if necessary * - * Set @flags in @worker->flags. + * Set @flags in @worker->flags and adjust nr_running accordingly. If + * nr_running becomes zero and @wakeup is %true, an idle worker is + * woken up. * * LOCKING: * spin_lock_irq(gcwq->lock). @@ -425,22 +601,49 @@ static void wake_up_worker(struct global_cwq *gcwq) static inline void worker_set_flags(struct worker *worker, unsigned int flags, bool wakeup) { + struct global_cwq *gcwq = worker->gcwq; + + /* + * If transitioning into NOT_RUNNING, adjust nr_running and + * wake up an idle worker as necessary if requested by + * @wakeup. + */ + if ((flags & WORKER_NOT_RUNNING) && + !(worker->flags & WORKER_NOT_RUNNING)) { + atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); + + if (wakeup) { + if (atomic_dec_and_test(nr_running) && + !list_empty(&gcwq->worklist)) + wake_up_worker(gcwq); + } else + atomic_dec(nr_running); + } + worker->flags |= flags; } /** - * worker_clr_flags - clear worker flags + * worker_clr_flags - clear worker flags and adjust nr_running accordingly * @worker: worker to set flags for * @flags: flags to clear * - * Clear @flags in @worker->flags. + * Clear @flags in @worker->flags and adjust nr_running accordingly. * * LOCKING: * spin_lock_irq(gcwq->lock). */ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) { + struct global_cwq *gcwq = worker->gcwq; + unsigned int oflags = worker->flags; + worker->flags &= ~flags; + + /* if transitioning out of NOT_RUNNING, increment nr_running */ + if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) + if (!(worker->flags & WORKER_NOT_RUNNING)) + atomic_inc(get_gcwq_nr_running(gcwq->cpu)); } /** @@ -540,6 +743,8 @@ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { + struct global_cwq *gcwq = cwq->gcwq; + /* we own @work, set data and link */ set_work_cwq(work, cwq, extra_flags); @@ -550,7 +755,16 @@ static void insert_work(struct cpu_workqueue_struct *cwq, smp_wmb(); list_add_tail(&work->entry, head); - wake_up_worker(cwq->gcwq); + + /* + * Ensure either worker_sched_deactivated() sees the above + * list_add_tail() or we see zero nr_running to avoid workers + * lying around lazily while there are works to be processed. + */ + smp_mb(); + + if (!atomic_read(get_gcwq_nr_running(gcwq->cpu))) + wake_up_worker(gcwq); } /** @@ -810,11 +1024,16 @@ static void worker_enter_idle(struct worker *worker) worker_set_flags(worker, WORKER_IDLE, false); gcwq->nr_idle++; + worker->last_active = jiffies; /* idle_list is LIFO */ list_add(&worker->entry, &gcwq->idle_list); - if (unlikely(worker->flags & WORKER_ROGUE)) + if (likely(!(worker->flags & WORKER_ROGUE))) { + if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) + mod_timer(&gcwq->idle_timer, + jiffies + IDLE_WORKER_TIMEOUT); + } else wake_up_all(&gcwq->trustee_wait); } @@ -837,6 +1056,81 @@ static void worker_leave_idle(struct worker *worker) list_del_init(&worker->entry); } +/** + * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq + * @worker: self + * + * Works which are scheduled while the cpu is online must at least be + * scheduled to a worker which is bound to the cpu so that if they are + * flushed from cpu callbacks while cpu is going down, they are + * guaranteed to execute on the cpu. + * + * This function is to be used by rogue workers and rescuers to bind + * themselves to the target cpu and may race with cpu going down or + * coming online. kthread_bind() can't be used because it may put the + * worker to already dead cpu and set_cpus_allowed_ptr() can't be used + * verbatim as it's best effort and blocking and gcwq may be + * [dis]associated in the meantime. + * + * This function tries set_cpus_allowed() and locks gcwq and verifies + * the binding against GCWQ_DISASSOCIATED which is set during + * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters + * idle state or fetches works without dropping lock, it can guarantee + * the scheduling requirement described in the first paragraph. + * + * CONTEXT: + * Might sleep. Called without any lock but returns with gcwq->lock + * held. + * + * RETURNS: + * %true if the associated gcwq is online (@worker is successfully + * bound), %false if offline. + */ +static bool worker_maybe_bind_and_lock(struct worker *worker) +{ + struct global_cwq *gcwq = worker->gcwq; + struct task_struct *task = worker->task; + + while (true) { + /* + * The following call may fail, succeed or succeed + * without actually migrating the task to the cpu if + * it races with cpu hotunplug operation. Verify + * against GCWQ_DISASSOCIATED. + */ + set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); + + spin_lock_irq(&gcwq->lock); + if (gcwq->flags & GCWQ_DISASSOCIATED) + return false; + if (task_cpu(task) == gcwq->cpu && + cpumask_equal(¤t->cpus_allowed, + get_cpu_mask(gcwq->cpu))) + return true; + spin_unlock_irq(&gcwq->lock); + + /* CPU has come up inbetween, retry migration */ + cpu_relax(); + } +} + +/* + * Function for worker->rebind_work used to rebind rogue busy workers + * to the associated cpu which is coming back online. This is + * scheduled by cpu up but can race with other cpu hotplug operations + * and may be executed twice without intervening cpu down. + */ +static void worker_rebind_fn(struct work_struct *work) +{ + struct worker *worker = container_of(work, struct worker, rebind_work); + struct global_cwq *gcwq = worker->gcwq; + + if (worker_maybe_bind_and_lock(worker)) + worker_clr_flags(worker, WORKER_REBIND); + + spin_unlock_irq(&gcwq->lock); +} + static struct worker *alloc_worker(void) { struct worker *worker; @@ -845,6 +1139,9 @@ static struct worker *alloc_worker(void) if (worker) { INIT_LIST_HEAD(&worker->entry); INIT_LIST_HEAD(&worker->scheduled); + INIT_WORK(&worker->rebind_work, worker_rebind_fn); + /* on creation a worker is in !idle && prep state */ + worker->flags = WORKER_PREP; } return worker; } @@ -963,6 +1260,220 @@ static void destroy_worker(struct worker *worker) ida_remove(&gcwq->worker_ida, id); } +static void idle_worker_timeout(unsigned long __gcwq) +{ + struct global_cwq *gcwq = (void *)__gcwq; + + spin_lock_irq(&gcwq->lock); + + if (too_many_workers(gcwq)) { + struct worker *worker; + unsigned long expires; + + /* idle_list is kept in LIFO order, check the last one */ + worker = list_entry(gcwq->idle_list.prev, struct worker, entry); + expires = worker->last_active + IDLE_WORKER_TIMEOUT; + + if (time_before(jiffies, expires)) + mod_timer(&gcwq->idle_timer, expires); + else { + /* it's been idle for too long, wake up manager */ + gcwq->flags |= GCWQ_MANAGE_WORKERS; + wake_up_worker(gcwq); + } + } + + spin_unlock_irq(&gcwq->lock); +} + +static bool send_mayday(struct work_struct *work) +{ + struct cpu_workqueue_struct *cwq = get_work_cwq(work); + struct workqueue_struct *wq = cwq->wq; + + if (!(wq->flags & WQ_RESCUER)) + return false; + + /* mayday mayday mayday */ + if (!cpumask_test_and_set_cpu(cwq->gcwq->cpu, wq->mayday_mask)) + wake_up_process(wq->rescuer->task); + return true; +} + +static void gcwq_mayday_timeout(unsigned long __gcwq) +{ + struct global_cwq *gcwq = (void *)__gcwq; + struct work_struct *work; + + spin_lock_irq(&gcwq->lock); + + if (need_to_create_worker(gcwq)) { + /* + * We've been trying to create a new worker but + * haven't been successful. We might be hitting an + * allocation deadlock. Send distress signals to + * rescuers. + */ + list_for_each_entry(work, &gcwq->worklist, entry) + send_mayday(work); + } + + spin_unlock_irq(&gcwq->lock); + + mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL); +} + +/** + * maybe_create_worker - create a new worker if necessary + * @gcwq: gcwq to create a new worker for + * + * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to + * have at least one idle worker on return from this function. If + * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is + * sent to all rescuers with works scheduled on @gcwq to resolve + * possible allocation deadlock. + * + * On return, need_to_create_worker() is guaranteed to be false and + * may_start_working() true. + * + * LOCKING: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. Called only from + * manager. + * + * RETURNS: + * false if no action was taken and gcwq->lock stayed locked, true + * otherwise. + */ +static bool maybe_create_worker(struct global_cwq *gcwq) +{ + if (!need_to_create_worker(gcwq)) + return false; +restart: + /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ + mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); + + while (true) { + struct worker *worker; + + spin_unlock_irq(&gcwq->lock); + + worker = create_worker(gcwq, true); + if (worker) { + del_timer_sync(&gcwq->mayday_timer); + spin_lock_irq(&gcwq->lock); + start_worker(worker); + BUG_ON(need_to_create_worker(gcwq)); + return true; + } + + if (!need_to_create_worker(gcwq)) + break; + + spin_unlock_irq(&gcwq->lock); + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(CREATE_COOLDOWN); + spin_lock_irq(&gcwq->lock); + if (!need_to_create_worker(gcwq)) + break; + } + + spin_unlock_irq(&gcwq->lock); + del_timer_sync(&gcwq->mayday_timer); + spin_lock_irq(&gcwq->lock); + if (need_to_create_worker(gcwq)) + goto restart; + return true; +} + +/** + * maybe_destroy_worker - destroy workers which have been idle for a while + * @gcwq: gcwq to destroy workers for + * + * Destroy @gcwq workers which have been idle for longer than + * IDLE_WORKER_TIMEOUT. + * + * LOCKING: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. Called only from manager. + * + * RETURNS: + * false if no action was taken and gcwq->lock stayed locked, true + * otherwise. + */ +static bool maybe_destroy_workers(struct global_cwq *gcwq) +{ + bool ret = false; + + while (too_many_workers(gcwq)) { + struct worker *worker; + unsigned long expires; + + worker = list_entry(gcwq->idle_list.prev, struct worker, entry); + expires = worker->last_active + IDLE_WORKER_TIMEOUT; + + if (time_before(jiffies, expires)) { + mod_timer(&gcwq->idle_timer, expires); + break; + } + + destroy_worker(worker); + ret = true; + } + + return ret; +} + +/** + * manage_workers - manage worker pool + * @worker: self + * + * Assume the manager role and manage gcwq worker pool @worker belongs + * to. At any given time, there can be only zero or one manager per + * gcwq. The exclusion is handled automatically by this function. + * + * The caller can safely start processing works on false return. On + * true return, it's guaranteed that need_to_create_worker() is false + * and may_start_working() is true. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock) which may be released and regrabbed + * multiple times. Does GFP_KERNEL allocations. + * + * RETURNS: + * false if no action was taken and gcwq->lock stayed locked, true if + * some action was taken. + */ +static bool manage_workers(struct worker *worker) +{ + struct global_cwq *gcwq = worker->gcwq; + bool ret = false; + + if (gcwq->flags & GCWQ_MANAGING_WORKERS) + return ret; + + gcwq->flags &= ~GCWQ_MANAGE_WORKERS; + gcwq->flags |= GCWQ_MANAGING_WORKERS; + + /* + * Destroy and then create so that may_start_working() is true + * on return. + */ + ret |= maybe_destroy_workers(gcwq); + ret |= maybe_create_worker(gcwq); + + gcwq->flags &= ~GCWQ_MANAGING_WORKERS; + + /* + * The trustee might be waiting to take over the manager + * position, tell it we're done. + */ + if (unlikely(gcwq->trustee)) + wake_up_all(&gcwq->trustee_wait); + + return ret; +} + /** * move_linked_works - move linked works to a list * @work: start of series of works to be scheduled @@ -1169,24 +1680,39 @@ static void process_scheduled_works(struct worker *worker) * worker_thread - the worker thread function * @__worker: self * - * The cwq worker thread function. + * The gcwq worker thread function. There's a single dynamic pool of + * these per each cpu. These workers process all works regardless of + * their specific target workqueue. The only exception is works which + * belong to workqueues with a rescuer which will be explained in + * rescuer_thread(). */ static int worker_thread(void *__worker) { struct worker *worker = __worker; struct global_cwq *gcwq = worker->gcwq; + /* tell the scheduler that this is a workqueue worker */ + worker->task->flags |= PF_WQ_WORKER; woke_up: spin_lock_irq(&gcwq->lock); /* DIE can be set only while we're idle, checking here is enough */ if (worker->flags & WORKER_DIE) { spin_unlock_irq(&gcwq->lock); + worker->task->flags &= ~PF_WQ_WORKER; return 0; } worker_leave_idle(worker); recheck: + /* no more worker necessary? */ + if (!need_more_worker(gcwq)) + goto sleep; + + /* do we need to manage? */ + if (unlikely(!may_start_working(gcwq)) && manage_workers(worker)) + goto recheck; + /* * ->scheduled list can only be filled while a worker is * preparing to process a work or actually processing it. @@ -1194,27 +1720,18 @@ recheck: */ BUG_ON(!list_empty(&worker->scheduled)); - while (!list_empty(&gcwq->worklist)) { + /* + * When control reaches this point, we're guaranteed to have + * at least one idle worker or that someone else has already + * assumed the manager role. + */ + worker_clr_flags(worker, WORKER_PREP); + + do { struct work_struct *work = list_first_entry(&gcwq->worklist, struct work_struct, entry); - /* - * The following is a rather inefficient way to close - * race window against cpu hotplug operations. Will - * be replaced soon. - */ - if (unlikely(!(worker->flags & WORKER_ROGUE) && - !cpumask_equal(&worker->task->cpus_allowed, - get_cpu_mask(gcwq->cpu)))) { - spin_unlock_irq(&gcwq->lock); - set_cpus_allowed_ptr(worker->task, - get_cpu_mask(gcwq->cpu)); - cpu_relax(); - spin_lock_irq(&gcwq->lock); - goto recheck; - } - if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { /* optimization path, not strictly necessary */ process_one_work(worker, work); @@ -1224,13 +1741,19 @@ recheck: move_linked_works(work, &worker->scheduled, NULL); process_scheduled_works(worker); } - } + } while (keep_working(gcwq)); + + worker_set_flags(worker, WORKER_PREP, false); + if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) + goto recheck; +sleep: /* - * gcwq->lock is held and there's no work to process, sleep. - * Workers are woken up only while holding gcwq->lock, so - * setting the current state before releasing gcwq->lock is - * enough to prevent losing any event. + * gcwq->lock is held and there's no work to process and no + * need to manage, sleep. Workers are woken up only while + * holding gcwq->lock or from local cpu, so setting the + * current state before releasing gcwq->lock is enough to + * prevent losing any event. */ worker_enter_idle(worker); __set_current_state(TASK_INTERRUPTIBLE); @@ -1239,6 +1762,68 @@ recheck: goto woke_up; } +/** + * rescuer_thread - the rescuer thread function + * @__wq: the associated workqueue + * + * Workqueue rescuer thread function. There's one rescuer for each + * workqueue which has WQ_RESCUER set. + * + * Regular work processing on a gcwq may block trying to create a new + * worker which uses GFP_KERNEL allocation which has slight chance of + * developing into deadlock if some works currently on the same queue + * need to be processed to satisfy the GFP_KERNEL allocation. This is + * the problem rescuer solves. + * + * When such condition is possible, the gcwq summons rescuers of all + * workqueues which have works queued on the gcwq and let them process + * those works so that forward progress can be guaranteed. + * + * This should happen rarely. + */ +static int rescuer_thread(void *__wq) +{ + struct workqueue_struct *wq = __wq; + struct worker *rescuer = wq->rescuer; + struct list_head *scheduled = &rescuer->scheduled; + unsigned int cpu; + + set_user_nice(current, RESCUER_NICE_LEVEL); +repeat: + set_current_state(TASK_INTERRUPTIBLE); + + if (kthread_should_stop()) + return 0; + + for_each_cpu(cpu, wq->mayday_mask) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + struct global_cwq *gcwq = cwq->gcwq; + struct work_struct *work, *n; + + __set_current_state(TASK_RUNNING); + cpumask_clear_cpu(cpu, wq->mayday_mask); + + /* migrate to the target cpu if possible */ + rescuer->gcwq = gcwq; + worker_maybe_bind_and_lock(rescuer); + + /* + * Slurp in all works issued via this workqueue and + * process'em. + */ + BUG_ON(!list_empty(&rescuer->scheduled)); + list_for_each_entry_safe(work, n, &gcwq->worklist, entry) + if (get_work_cwq(work) == cwq) + move_linked_works(work, scheduled, &n); + + process_scheduled_works(rescuer); + spin_unlock_irq(&gcwq->lock); + } + + schedule(); + goto repeat; +} + struct wq_barrier { struct work_struct work; struct completion done; @@ -1998,7 +2583,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, const char *lock_name) { struct workqueue_struct *wq; - bool failed = false; unsigned int cpu; max_active = clamp_val(max_active, 1, INT_MAX); @@ -2023,13 +2607,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); - cpu_maps_update_begin(); - /* - * We must initialize cwqs for each possible cpu even if we - * are going to call destroy_workqueue() finally. Otherwise - * cpu_up() can hit the uninitialized cwq once we drop the - * lock. - */ for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct global_cwq *gcwq = get_gcwq(cpu); @@ -2040,14 +2617,25 @@ struct workqueue_struct *__create_workqueue_key(const char *name, cwq->flush_color = -1; cwq->max_active = max_active; INIT_LIST_HEAD(&cwq->delayed_works); + } - if (failed) - continue; - cwq->worker = create_worker(gcwq, cpu_online(cpu)); - if (cwq->worker) - start_worker(cwq->worker); - else - failed = true; + if (flags & WQ_RESCUER) { + struct worker *rescuer; + + if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL)) + goto err; + + wq->rescuer = rescuer = alloc_worker(); + if (!rescuer) + goto err; + + rescuer->task = kthread_create(rescuer_thread, wq, "%s", name); + if (IS_ERR(rescuer->task)) + goto err; + + wq->rescuer = rescuer; + rescuer->task->flags |= PF_THREAD_BOUND; + wake_up_process(rescuer->task); } /* @@ -2065,16 +2653,12 @@ struct workqueue_struct *__create_workqueue_key(const char *name, spin_unlock(&workqueue_lock); - cpu_maps_update_done(); - - if (failed) { - destroy_workqueue(wq); - wq = NULL; - } return wq; err: if (wq) { free_cwqs(wq->cpu_wq); + free_cpumask_var(wq->mayday_mask); + kfree(wq->rescuer); kfree(wq); } return NULL; @@ -2097,42 +2681,26 @@ void destroy_workqueue(struct workqueue_struct *wq) * wq list is used to freeze wq, remove from list after * flushing is complete in case freeze races us. */ - cpu_maps_update_begin(); spin_lock(&workqueue_lock); list_del(&wq->list); spin_unlock(&workqueue_lock); - cpu_maps_update_done(); + /* sanity check */ for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - struct global_cwq *gcwq = cwq->gcwq; int i; - if (cwq->worker) { - retry: - spin_lock_irq(&gcwq->lock); - /* - * Worker can only be destroyed while idle. - * Wait till it becomes idle. This is ugly - * and prone to starvation. It will go away - * once dynamic worker pool is implemented. - */ - if (!(cwq->worker->flags & WORKER_IDLE)) { - spin_unlock_irq(&gcwq->lock); - msleep(100); - goto retry; - } - destroy_worker(cwq->worker); - cwq->worker = NULL; - spin_unlock_irq(&gcwq->lock); - } - for (i = 0; i < WORK_NR_COLORS; i++) BUG_ON(cwq->nr_in_flight[i]); BUG_ON(cwq->nr_active); BUG_ON(!list_empty(&cwq->delayed_works)); } + if (wq->flags & WQ_RESCUER) { + kthread_stop(wq->rescuer->task); + free_cpumask_var(wq->mayday_mask); + } + free_cwqs(wq->cpu_wq); kfree(wq); } @@ -2141,10 +2709,18 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); /* * CPU hotplug. * - * CPU hotplug is implemented by allowing cwqs to be detached from - * CPU, running with unbound workers and allowing them to be - * reattached later if the cpu comes back online. A separate thread - * is created to govern cwqs in such state and is called the trustee. + * There are two challenges in supporting CPU hotplug. Firstly, there + * are a lot of assumptions on strong associations among work, cwq and + * gcwq which make migrating pending and scheduled works very + * difficult to implement without impacting hot paths. Secondly, + * gcwqs serve mix of short, long and very long running works making + * blocked draining impractical. + * + * This is solved by allowing a gcwq to be detached from CPU, running + * it with unbound (rogue) workers and allowing it to be reattached + * later if the cpu comes back online. A separate thread is created + * to govern a gcwq in such state and is called the trustee of the + * gcwq. * * Trustee states and their descriptions. * @@ -2152,11 +2728,12 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); * new trustee is started with this state. * * IN_CHARGE Once started, trustee will enter this state after - * making all existing workers rogue. DOWN_PREPARE waits - * for trustee to enter this state. After reaching - * IN_CHARGE, trustee tries to execute the pending - * worklist until it's empty and the state is set to - * BUTCHER, or the state is set to RELEASE. + * assuming the manager role and making all existing + * workers rogue. DOWN_PREPARE waits for trustee to + * enter this state. After reaching IN_CHARGE, trustee + * tries to execute the pending worklist until it's empty + * and the state is set to BUTCHER, or the state is set + * to RELEASE. * * BUTCHER Command state which is set by the cpu callback after * the cpu has went down. Once this state is set trustee @@ -2167,7 +2744,9 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); * RELEASE Command state which is set by the cpu callback if the * cpu down has been canceled or it has come online * again. After recognizing this state, trustee stops - * trying to drain or butcher and transits to DONE. + * trying to drain or butcher and clears ROGUE, rebinds + * all remaining workers back to the cpu and releases + * manager role. * * DONE Trustee will enter this state after BUTCHER or RELEASE * is complete. @@ -2233,17 +2812,24 @@ static int __cpuinit trustee_thread(void *__gcwq) { struct global_cwq *gcwq = __gcwq; struct worker *worker; + struct work_struct *work; struct hlist_node *pos; + long rc; int i; BUG_ON(gcwq->cpu != smp_processor_id()); spin_lock_irq(&gcwq->lock); /* - * Make all workers rogue. Trustee must be bound to the - * target cpu and can't be cancelled. + * Claim the manager position and make all workers rogue. + * Trustee must be bound to the target cpu and can't be + * cancelled. */ BUG_ON(gcwq->cpu != smp_processor_id()); + rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS)); + BUG_ON(rc < 0); + + gcwq->flags |= GCWQ_MANAGING_WORKERS; list_for_each_entry(worker, &gcwq->idle_list, entry) worker_set_flags(worker, WORKER_ROGUE, false); @@ -2251,6 +2837,28 @@ static int __cpuinit trustee_thread(void *__gcwq) for_each_busy_worker(worker, i, pos, gcwq) worker_set_flags(worker, WORKER_ROGUE, false); + /* + * Call schedule() so that we cross rq->lock and thus can + * guarantee sched callbacks see the rogue flag. This is + * necessary as scheduler callbacks may be invoked from other + * cpus. + */ + spin_unlock_irq(&gcwq->lock); + schedule(); + spin_lock_irq(&gcwq->lock); + + /* + * Sched callbacks are disabled now. gcwq->nr_running should + * be zero and will stay that way, making need_more_worker() + * and keep_working() always return true as long as the + * worklist is not empty. + */ + WARN_ON_ONCE(atomic_read(get_gcwq_nr_running(gcwq->cpu)) != 0); + + spin_unlock_irq(&gcwq->lock); + del_timer_sync(&gcwq->idle_timer); + spin_lock_irq(&gcwq->lock); + /* * We're now in charge. Notify and proceed to drain. We need * to keep the gcwq running during the whole CPU down @@ -2263,18 +2871,90 @@ static int __cpuinit trustee_thread(void *__gcwq) /* * The original cpu is in the process of dying and may go away * anytime now. When that happens, we and all workers would - * be migrated to other cpus. Try draining any left work. - * Note that if the gcwq is frozen, there may be frozen works - * in freezeable cwqs. Don't declare completion while frozen. + * be migrated to other cpus. Try draining any left work. We + * want to get it over with ASAP - spam rescuers, wake up as + * many idlers as necessary and create new ones till the + * worklist is empty. Note that if the gcwq is frozen, there + * may be frozen works in freezeable cwqs. Don't declare + * completion while frozen. */ while (gcwq->nr_workers != gcwq->nr_idle || gcwq->flags & GCWQ_FREEZING || gcwq->trustee_state == TRUSTEE_IN_CHARGE) { + int nr_works = 0; + + list_for_each_entry(work, &gcwq->worklist, entry) { + send_mayday(work); + nr_works++; + } + + list_for_each_entry(worker, &gcwq->idle_list, entry) { + if (!nr_works--) + break; + wake_up_process(worker->task); + } + + if (need_to_create_worker(gcwq)) { + spin_unlock_irq(&gcwq->lock); + worker = create_worker(gcwq, false); + spin_lock_irq(&gcwq->lock); + if (worker) { + worker_set_flags(worker, WORKER_ROGUE, false); + start_worker(worker); + } + } + /* give a breather */ if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) break; } + /* + * Either all works have been scheduled and cpu is down, or + * cpu down has already been canceled. Wait for and butcher + * all workers till we're canceled. + */ + do { + rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); + while (!list_empty(&gcwq->idle_list)) + destroy_worker(list_first_entry(&gcwq->idle_list, + struct worker, entry)); + } while (gcwq->nr_workers && rc >= 0); + + /* + * At this point, either draining has completed and no worker + * is left, or cpu down has been canceled or the cpu is being + * brought back up. There shouldn't be any idle one left. + * Tell the remaining busy ones to rebind once it finishes the + * currently scheduled works by scheduling the rebind_work. + */ + WARN_ON(!list_empty(&gcwq->idle_list)); + + for_each_busy_worker(worker, i, pos, gcwq) { + struct work_struct *rebind_work = &worker->rebind_work; + + /* + * Rebind_work may race with future cpu hotplug + * operations. Use a separate flag to mark that + * rebinding is scheduled. + */ + worker_set_flags(worker, WORKER_REBIND, false); + worker_clr_flags(worker, WORKER_ROGUE); + + /* queue rebind_work, wq doesn't matter, use the default one */ + if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, + work_data_bits(rebind_work))) + continue; + + debug_work_activate(rebind_work); + insert_work(get_cwq(gcwq->cpu, keventd_wq), rebind_work, + worker->scheduled.next, + work_color_to_flags(WORK_NO_COLOR)); + } + + /* relinquish manager role */ + gcwq->flags &= ~GCWQ_MANAGING_WORKERS; + /* notify completion */ gcwq->trustee = NULL; gcwq->trustee_state = TRUSTEE_DONE; @@ -2313,10 +2993,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned int cpu = (unsigned long)hcpu; struct global_cwq *gcwq = get_gcwq(cpu); struct task_struct *new_trustee = NULL; - struct worker *worker; - struct hlist_node *pos; + struct worker *uninitialized_var(new_worker); unsigned long flags; - int i; action &= ~CPU_TASKS_FROZEN; @@ -2327,6 +3005,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, if (IS_ERR(new_trustee)) return notifier_from_errno(PTR_ERR(new_trustee)); kthread_bind(new_trustee, cpu); + /* fall through */ + case CPU_UP_PREPARE: + BUG_ON(gcwq->first_idle); + new_worker = create_worker(gcwq, false); + if (!new_worker) { + if (new_trustee) + kthread_stop(new_trustee); + return NOTIFY_BAD; + } } /* some are called w/ irq disabled, don't disturb irq status */ @@ -2340,26 +3027,50 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, gcwq->trustee_state = TRUSTEE_START; wake_up_process(gcwq->trustee); wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); + /* fall through */ + case CPU_UP_PREPARE: + BUG_ON(gcwq->first_idle); + gcwq->first_idle = new_worker; + break; + + case CPU_DYING: + /* + * Before this, the trustee and all workers except for + * the ones which are still executing works from + * before the last CPU down must be on the cpu. After + * this, they'll all be diasporas. + */ + gcwq->flags |= GCWQ_DISASSOCIATED; break; case CPU_POST_DEAD: gcwq->trustee_state = TRUSTEE_BUTCHER; + /* fall through */ + case CPU_UP_CANCELED: + destroy_worker(gcwq->first_idle); + gcwq->first_idle = NULL; break; case CPU_DOWN_FAILED: case CPU_ONLINE: + gcwq->flags &= ~GCWQ_DISASSOCIATED; if (gcwq->trustee_state != TRUSTEE_DONE) { gcwq->trustee_state = TRUSTEE_RELEASE; wake_up_process(gcwq->trustee); wait_trustee_state(gcwq, TRUSTEE_DONE); } - /* clear ROGUE from all workers */ - list_for_each_entry(worker, &gcwq->idle_list, entry) - worker_clr_flags(worker, WORKER_ROGUE); - - for_each_busy_worker(worker, i, pos, gcwq) - worker_clr_flags(worker, WORKER_ROGUE); + /* + * Trustee is done and there might be no worker left. + * Put the first_idle in and request a real manager to + * take a look. + */ + spin_unlock_irq(&gcwq->lock); + kthread_bind(gcwq->first_idle->task, cpu); + spin_lock_irq(&gcwq->lock); + gcwq->flags |= GCWQ_MANAGE_WORKERS; + start_worker(gcwq->first_idle); + gcwq->first_idle = NULL; break; } @@ -2548,10 +3259,10 @@ void thaw_workqueues(void) if (wq->single_cpu == gcwq->cpu && !cwq->nr_active && list_empty(&cwq->delayed_works)) cwq_unbind_single_cpu(cwq); - - wake_up_process(cwq->worker->task); } + wake_up_worker(gcwq); + spin_unlock_irq(&gcwq->lock); } @@ -2588,12 +3299,31 @@ void __init init_workqueues(void) for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) INIT_HLIST_HEAD(&gcwq->busy_hash[i]); + init_timer_deferrable(&gcwq->idle_timer); + gcwq->idle_timer.function = idle_worker_timeout; + gcwq->idle_timer.data = (unsigned long)gcwq; + + setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout, + (unsigned long)gcwq); + ida_init(&gcwq->worker_ida); gcwq->trustee_state = TRUSTEE_DONE; init_waitqueue_head(&gcwq->trustee_wait); } + /* create the initial worker */ + for_each_online_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + struct worker *worker; + + worker = create_worker(gcwq, true); + BUG_ON(!worker); + spin_lock_irq(&gcwq->lock); + start_worker(worker); + spin_unlock_irq(&gcwq->lock); + } + keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); } diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h index af040babb742..2d10fc98dc79 100644 --- a/kernel/workqueue_sched.h +++ b/kernel/workqueue_sched.h @@ -4,13 +4,6 @@ * Scheduler hooks for concurrency managed workqueue. Only to be * included from sched.c and workqueue.c. */ -static inline void wq_worker_waking_up(struct task_struct *task, - unsigned int cpu) -{ -} - -static inline struct task_struct *wq_worker_sleeping(struct task_struct *task, - unsigned int cpu) -{ - return NULL; -} +void wq_worker_waking_up(struct task_struct *task, unsigned int cpu); +struct task_struct *wq_worker_sleeping(struct task_struct *task, + unsigned int cpu); -- cgit v1.2.3-59-g8ed1b From b71ab8c2025caef8db719aa41af0ed735dc543cd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: increase max_active of keventd and kill current_is_keventd() Define WQ_MAX_ACTIVE and create keventd with max_active set to half of it which means that keventd now can process upto WQ_MAX_ACTIVE / 2 - 1 works concurrently. Unless some combination can result in dependency loop longer than max_active, deadlock won't happen and thus it's unnecessary to check whether current_is_keventd() before trying to schedule a work. Kill current_is_keventd(). (Lockdep annotations are broken. We need lock_map_acquire_read_norecurse()) Signed-off-by: Tejun Heo Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Christoph Lameter Cc: Tony Luck Cc: Andi Kleen Cc: Oleg Nesterov --- arch/ia64/kernel/smpboot.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- include/linux/workqueue.h | 4 ++- kernel/workqueue.c | 63 ++++++++++------------------------------------ 4 files changed, 18 insertions(+), 53 deletions(-) diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 6a1380e90f87..99dcc85193c9 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -519,7 +519,7 @@ do_boot_cpu (int sapicid, int cpu) /* * We can't use kernel_thread since we must avoid to reschedule the child. */ - if (!keventd_up() || current_is_keventd()) + if (!keventd_up()) c_idle.work.func(&c_idle.work); else { schedule_work(&c_idle.work); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c4f33b2e77d6..4d90f376e985 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -735,7 +735,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) goto do_rest; } - if (!keventd_up() || current_is_keventd()) + if (!keventd_up()) c_idle.work.func(&c_idle.work); else { schedule_work(&c_idle.work); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b8f4ec45c40a..33e24e734d50 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -227,6 +227,9 @@ enum { WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ WQ_RESCUER = 1 << 3, /* has an rescue worker */ + + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, }; extern struct workqueue_struct * @@ -280,7 +283,6 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay) extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); extern int schedule_on_each_cpu(work_func_t func); -extern int current_is_keventd(void); extern int keventd_up(void); extern void init_workqueues(void); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0ad46523b423..4190e84cf995 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2398,7 +2398,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); int schedule_on_each_cpu(work_func_t func) { int cpu; - int orig = -1; struct work_struct *works; works = alloc_percpu(struct work_struct); @@ -2407,23 +2406,12 @@ int schedule_on_each_cpu(work_func_t func) get_online_cpus(); - /* - * When running in keventd don't schedule a work item on - * itself. Can just call directly because the work queue is - * already bound. This also is faster. - */ - if (current_is_keventd()) - orig = raw_smp_processor_id(); - for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); INIT_WORK(work, func); - if (cpu != orig) - schedule_work_on(cpu, work); + schedule_work_on(cpu, work); } - if (orig >= 0) - func(per_cpu_ptr(works, orig)); for_each_online_cpu(cpu) flush_work(per_cpu_ptr(works, cpu)); @@ -2494,41 +2482,6 @@ int keventd_up(void) return keventd_wq != NULL; } -int current_is_keventd(void) -{ - bool found = false; - unsigned int cpu; - - /* - * There no longer is one-to-one relation between worker and - * work queue and a worker task might be unbound from its cpu - * if the cpu was offlined. Match all busy workers. This - * function will go away once dynamic pool is implemented. - */ - for_each_possible_cpu(cpu) { - struct global_cwq *gcwq = get_gcwq(cpu); - struct worker *worker; - struct hlist_node *pos; - unsigned long flags; - int i; - - spin_lock_irqsave(&gcwq->lock, flags); - - for_each_busy_worker(worker, i, pos, gcwq) { - if (worker->task == current) { - found = true; - break; - } - } - - spin_unlock_irqrestore(&gcwq->lock, flags); - if (found) - break; - } - - return found; -} - static struct cpu_workqueue_struct *alloc_cwqs(void) { /* @@ -2576,6 +2529,16 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) #endif } +static int wq_clamp_max_active(int max_active, const char *name) +{ + if (max_active < 1 || max_active > WQ_MAX_ACTIVE) + printk(KERN_WARNING "workqueue: max_active %d requested for %s " + "is out of range, clamping between %d and %d\n", + max_active, name, 1, WQ_MAX_ACTIVE); + + return clamp_val(max_active, 1, WQ_MAX_ACTIVE); +} + struct workqueue_struct *__create_workqueue_key(const char *name, unsigned int flags, int max_active, @@ -2585,7 +2548,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct workqueue_struct *wq; unsigned int cpu; - max_active = clamp_val(max_active, 1, INT_MAX); + max_active = wq_clamp_max_active(max_active, name); wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) @@ -3324,6 +3287,6 @@ void __init init_workqueues(void) spin_unlock_irq(&gcwq->lock); } - keventd_wq = create_workqueue("events"); + keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); BUG_ON(!keventd_wq); } -- cgit v1.2.3-59-g8ed1b From d320c03830b17af64e4547075003b1eeb274bc6c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: s/__create_workqueue()/alloc_workqueue()/, and add system workqueues This patch makes changes to make new workqueue features available to its users. * Now that workqueue is more featureful, there should be a public workqueue creation function which takes paramters to control them. Rename __create_workqueue() to alloc_workqueue() and make 0 max_active mean WQ_DFL_ACTIVE. In the long run, all create_workqueue_*() will be converted over to alloc_workqueue(). * To further unify access interface, rename keventd_wq to system_wq and export it. * Add system_long_wq and system_nrt_wq. The former is to host long running works separately (so that flush_scheduled_work() dosen't take so long) and the latter guarantees any queued work item is never executed in parallel by multiple CPUs. These will be used by future patches to update workqueue users. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 40 +++++++++++++++++++++++++++++----------- kernel/workqueue.c | 42 +++++++++++++++++++++++++----------------- 2 files changed, 54 insertions(+), 28 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 33e24e734d50..48b7422f25ae 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -232,12 +232,31 @@ enum { WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, }; +/* + * System-wide workqueues which are always present. + * + * system_wq is the one used by schedule[_delayed]_work[_on](). + * Multi-CPU multi-threaded. There are users which expect relatively + * short queue flush time. Don't queue works which can run for too + * long. + * + * system_long_wq is similar to system_wq but may host long running + * works. Queue flushing might take relatively long. + * + * system_nrt_wq is non-reentrant and guarantees that any given work + * item is never executed in parallel by multiple CPUs. Queue + * flushing might take relatively long. + */ +extern struct workqueue_struct *system_wq; +extern struct workqueue_struct *system_long_wq; +extern struct workqueue_struct *system_nrt_wq; + extern struct workqueue_struct * -__create_workqueue_key(const char *name, unsigned int flags, int max_active, - struct lock_class_key *key, const char *lock_name); +__alloc_workqueue_key(const char *name, unsigned int flags, int max_active, + struct lock_class_key *key, const char *lock_name); #ifdef CONFIG_LOCKDEP -#define __create_workqueue(name, flags, max_active) \ +#define alloc_workqueue(name, flags, max_active) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ @@ -247,21 +266,20 @@ __create_workqueue_key(const char *name, unsigned int flags, int max_active, else \ __lock_name = #name; \ \ - __create_workqueue_key((name), (flags), (max_active), \ - &__key, __lock_name); \ + __alloc_workqueue_key((name), (flags), (max_active), \ + &__key, __lock_name); \ }) #else -#define __create_workqueue(name, flags, max_active) \ - __create_workqueue_key((name), (flags), (max_active), NULL, NULL) +#define alloc_workqueue(name, flags, max_active) \ + __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) #endif #define create_workqueue(name) \ - __create_workqueue((name), WQ_RESCUER, 1) + alloc_workqueue((name), WQ_RESCUER, 1) #define create_freezeable_workqueue(name) \ - __create_workqueue((name), \ - WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1) + alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1) #define create_singlethread_workqueue(name) \ - __create_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1) + alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4190e84cf995..16ce617974d2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -210,6 +210,13 @@ struct workqueue_struct { #endif }; +struct workqueue_struct *system_wq __read_mostly; +struct workqueue_struct *system_long_wq __read_mostly; +struct workqueue_struct *system_nrt_wq __read_mostly; +EXPORT_SYMBOL_GPL(system_wq); +EXPORT_SYMBOL_GPL(system_long_wq); +EXPORT_SYMBOL_GPL(system_nrt_wq); + #define for_each_busy_worker(worker, i, pos, gcwq) \ for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) @@ -2306,8 +2313,6 @@ int cancel_delayed_work_sync(struct delayed_work *dwork) } EXPORT_SYMBOL(cancel_delayed_work_sync); -static struct workqueue_struct *keventd_wq __read_mostly; - /** * schedule_work - put work task in global workqueue * @work: job to be done @@ -2321,7 +2326,7 @@ static struct workqueue_struct *keventd_wq __read_mostly; */ int schedule_work(struct work_struct *work) { - return queue_work(keventd_wq, work); + return queue_work(system_wq, work); } EXPORT_SYMBOL(schedule_work); @@ -2334,7 +2339,7 @@ EXPORT_SYMBOL(schedule_work); */ int schedule_work_on(int cpu, struct work_struct *work) { - return queue_work_on(cpu, keventd_wq, work); + return queue_work_on(cpu, system_wq, work); } EXPORT_SYMBOL(schedule_work_on); @@ -2349,7 +2354,7 @@ EXPORT_SYMBOL(schedule_work_on); int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { - return queue_delayed_work(keventd_wq, dwork, delay); + return queue_delayed_work(system_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work); @@ -2382,7 +2387,7 @@ EXPORT_SYMBOL(flush_delayed_work); int schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { - return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); + return queue_delayed_work_on(cpu, system_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work_on); @@ -2447,7 +2452,7 @@ int schedule_on_each_cpu(work_func_t func) */ void flush_scheduled_work(void) { - flush_workqueue(keventd_wq); + flush_workqueue(system_wq); } EXPORT_SYMBOL(flush_scheduled_work); @@ -2479,7 +2484,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); int keventd_up(void) { - return keventd_wq != NULL; + return system_wq != NULL; } static struct cpu_workqueue_struct *alloc_cwqs(void) @@ -2539,15 +2544,16 @@ static int wq_clamp_max_active(int max_active, const char *name) return clamp_val(max_active, 1, WQ_MAX_ACTIVE); } -struct workqueue_struct *__create_workqueue_key(const char *name, - unsigned int flags, - int max_active, - struct lock_class_key *key, - const char *lock_name) +struct workqueue_struct *__alloc_workqueue_key(const char *name, + unsigned int flags, + int max_active, + struct lock_class_key *key, + const char *lock_name) { struct workqueue_struct *wq; unsigned int cpu; + max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, name); wq = kzalloc(sizeof(*wq), GFP_KERNEL); @@ -2626,7 +2632,7 @@ err: } return NULL; } -EXPORT_SYMBOL_GPL(__create_workqueue_key); +EXPORT_SYMBOL_GPL(__alloc_workqueue_key); /** * destroy_workqueue - safely terminate a workqueue @@ -2910,7 +2916,7 @@ static int __cpuinit trustee_thread(void *__gcwq) continue; debug_work_activate(rebind_work); - insert_work(get_cwq(gcwq->cpu, keventd_wq), rebind_work, + insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, worker->scheduled.next, work_color_to_flags(WORK_NO_COLOR)); } @@ -3287,6 +3293,8 @@ void __init init_workqueues(void) spin_unlock_irq(&gcwq->lock); } - keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); - BUG_ON(!keventd_wq); + system_wq = alloc_workqueue("events", 0, 0); + system_long_wq = alloc_workqueue("events_long", 0, 0); + system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); + BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); } -- cgit v1.2.3-59-g8ed1b From dcd989cb73ab0f7b722d64ab6516f101d9f43f88 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: implement several utility APIs Implement the following utility APIs. workqueue_set_max_active() : adjust max_active of a wq workqueue_congested() : test whether a wq is contested work_cpu() : determine the last / current cpu of a work work_busy() : query whether a work is busy * Anton Blanchard fixed missing ret initialization in work_busy(). Signed-off-by: Tejun Heo Cc: Anton Blanchard --- include/linux/workqueue.h | 11 ++++- kernel/workqueue.c | 108 +++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 117 insertions(+), 2 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 48b7422f25ae..0a7f79729380 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -61,6 +61,10 @@ enum { WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, WORK_STRUCT_NO_CPU = NR_CPUS << WORK_STRUCT_FLAG_BITS, + + /* bit mask for work_busy() return values */ + WORK_BUSY_PENDING = 1 << 0, + WORK_BUSY_RUNNING = 1 << 1, }; struct work_struct { @@ -307,9 +311,14 @@ extern void init_workqueues(void); int execute_in_process_context(work_func_t fn, struct execute_work *); extern int flush_work(struct work_struct *work); - extern int cancel_work_sync(struct work_struct *work); +extern void workqueue_set_max_active(struct workqueue_struct *wq, + int max_active); +extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); +extern unsigned int work_cpu(struct work_struct *work); +extern unsigned int work_busy(struct work_struct *work); + /* * Kill off a pending schedule_delayed_work(). Note that the work callback * function may still be running on return from cancel_delayed_work(), unless diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 16ce617974d2..c1aa65c2ff38 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -203,7 +203,7 @@ struct workqueue_struct { cpumask_var_t mayday_mask; /* cpus requesting rescue */ struct worker *rescuer; /* I: rescue worker */ - int saved_max_active; /* I: saved cwq max_active */ + int saved_max_active; /* W: saved cwq max_active */ const char *name; /* I: workqueue name */ #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; @@ -2675,6 +2675,112 @@ void destroy_workqueue(struct workqueue_struct *wq) } EXPORT_SYMBOL_GPL(destroy_workqueue); +/** + * workqueue_set_max_active - adjust max_active of a workqueue + * @wq: target workqueue + * @max_active: new max_active value. + * + * Set max_active of @wq to @max_active. + * + * CONTEXT: + * Don't call from IRQ context. + */ +void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) +{ + unsigned int cpu; + + max_active = wq_clamp_max_active(max_active, wq->name); + + spin_lock(&workqueue_lock); + + wq->saved_max_active = max_active; + + for_each_possible_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + + spin_lock_irq(&gcwq->lock); + + if (!(wq->flags & WQ_FREEZEABLE) || + !(gcwq->flags & GCWQ_FREEZING)) + get_cwq(gcwq->cpu, wq)->max_active = max_active; + + spin_unlock_irq(&gcwq->lock); + } + + spin_unlock(&workqueue_lock); +} +EXPORT_SYMBOL_GPL(workqueue_set_max_active); + +/** + * workqueue_congested - test whether a workqueue is congested + * @cpu: CPU in question + * @wq: target workqueue + * + * Test whether @wq's cpu workqueue for @cpu is congested. There is + * no synchronization around this function and the test result is + * unreliable and only useful as advisory hints or for debugging. + * + * RETURNS: + * %true if congested, %false otherwise. + */ +bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) +{ + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + return !list_empty(&cwq->delayed_works); +} +EXPORT_SYMBOL_GPL(workqueue_congested); + +/** + * work_cpu - return the last known associated cpu for @work + * @work: the work of interest + * + * RETURNS: + * CPU number if @work was ever queued. NR_CPUS otherwise. + */ +unsigned int work_cpu(struct work_struct *work) +{ + struct global_cwq *gcwq = get_work_gcwq(work); + + return gcwq ? gcwq->cpu : NR_CPUS; +} +EXPORT_SYMBOL_GPL(work_cpu); + +/** + * work_busy - test whether a work is currently pending or running + * @work: the work to be tested + * + * Test whether @work is currently pending or running. There is no + * synchronization around this function and the test result is + * unreliable and only useful as advisory hints or for debugging. + * Especially for reentrant wqs, the pending state might hide the + * running state. + * + * RETURNS: + * OR'd bitmask of WORK_BUSY_* bits. + */ +unsigned int work_busy(struct work_struct *work) +{ + struct global_cwq *gcwq = get_work_gcwq(work); + unsigned long flags; + unsigned int ret = 0; + + if (!gcwq) + return false; + + spin_lock_irqsave(&gcwq->lock, flags); + + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + if (find_worker_executing_work(gcwq, work)) + ret |= WORK_BUSY_RUNNING; + + spin_unlock_irqrestore(&gcwq->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(work_busy); + /* * CPU hotplug. * -- cgit v1.2.3-59-g8ed1b From 649027d73a6309ac34dc2886362e662bd73456dc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:14 +0200 Subject: workqueue: implement high priority workqueue This patch implements high priority workqueue which can be specified with WQ_HIGHPRI flag on creation. A high priority workqueue has the following properties. * A work queued to it is queued at the head of the worklist of the respective gcwq after other highpri works, while normal works are always appended at the end. * As long as there are highpri works on gcwq->worklist, [__]need_more_worker() remains %true and process_one_work() wakes up another worker before it start executing a work. The above two properties guarantee that works queued to high priority workqueues are dispatched to workers and start execution as soon as possible regardless of the state of other works. Signed-off-by: Tejun Heo Cc: Andi Kleen Cc: Andrew Morton --- include/linux/workqueue.h | 1 + kernel/workqueue.c | 70 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 65 insertions(+), 6 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0a7f79729380..006dcf7e808a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -231,6 +231,7 @@ enum { WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ WQ_RESCUER = 1 << 3, /* has an rescue worker */ + WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c1aa65c2ff38..5775717288d5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -43,6 +43,7 @@ enum { GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */ GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ GCWQ_FREEZING = 1 << 3, /* freeze in progress */ + GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */ /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ @@ -452,15 +453,19 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) * assume that they're being called with gcwq->lock held. */ +static bool __need_more_worker(struct global_cwq *gcwq) +{ + return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) || + gcwq->flags & GCWQ_HIGHPRI_PENDING; +} + /* * Need to wake up a worker? Called from anything but currently * running workers. */ static bool need_more_worker(struct global_cwq *gcwq) { - atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); - - return !list_empty(&gcwq->worklist) && !atomic_read(nr_running); + return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq); } /* Can I start working? Called from busy but !running workers. */ @@ -733,6 +738,43 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq, work); } +/** + * gcwq_determine_ins_pos - find insertion position + * @gcwq: gcwq of interest + * @cwq: cwq a work is being queued for + * + * A work for @cwq is about to be queued on @gcwq, determine insertion + * position for the work. If @cwq is for HIGHPRI wq, the work is + * queued at the head of the queue but in FIFO order with respect to + * other HIGHPRI works; otherwise, at the end of the queue. This + * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that + * there are HIGHPRI works pending. + * + * CONTEXT: + * spin_lock_irq(gcwq->lock). + * + * RETURNS: + * Pointer to inserstion position. + */ +static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq, + struct cpu_workqueue_struct *cwq) +{ + struct work_struct *twork; + + if (likely(!(cwq->wq->flags & WQ_HIGHPRI))) + return &gcwq->worklist; + + list_for_each_entry(twork, &gcwq->worklist, entry) { + struct cpu_workqueue_struct *tcwq = get_work_cwq(twork); + + if (!(tcwq->wq->flags & WQ_HIGHPRI)) + break; + } + + gcwq->flags |= GCWQ_HIGHPRI_PENDING; + return &twork->entry; +} + /** * insert_work - insert a work into gcwq * @cwq: cwq @work belongs to @@ -770,7 +812,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, */ smp_mb(); - if (!atomic_read(get_gcwq_nr_running(gcwq->cpu))) + if (__need_more_worker(gcwq)) wake_up_worker(gcwq); } @@ -887,7 +929,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, if (likely(cwq->nr_active < cwq->max_active)) { cwq->nr_active++; - worklist = &gcwq->worklist; + worklist = gcwq_determine_ins_pos(gcwq, cwq); } else worklist = &cwq->delayed_works; @@ -1526,8 +1568,9 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) { struct work_struct *work = list_first_entry(&cwq->delayed_works, struct work_struct, entry); + struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); - move_linked_works(work, &cwq->gcwq->worklist, NULL); + move_linked_works(work, pos, NULL); cwq->nr_active++; } @@ -1634,6 +1677,21 @@ static void process_one_work(struct worker *worker, struct work_struct *work) set_work_cpu(work, gcwq->cpu); list_del_init(&work->entry); + /* + * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI, + * wake up another worker; otherwise, clear HIGHPRI_PENDING. + */ + if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) { + struct work_struct *nwork = list_first_entry(&gcwq->worklist, + struct work_struct, entry); + + if (!list_empty(&gcwq->worklist) && + get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI) + wake_up_worker(gcwq); + else + gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; + } + spin_unlock_irq(&gcwq->lock); work_clear_pending(work); -- cgit v1.2.3-59-g8ed1b From fb0e7beb5c1b6fb4da786ba709d7138373d5fb22 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 29 Jun 2010 10:07:15 +0200 Subject: workqueue: implement cpu intensive workqueue This patch implements cpu intensive workqueue which can be specified with WQ_CPU_INTENSIVE flag on creation. Works queued to a cpu intensive workqueue don't participate in concurrency management. IOW, it doesn't contribute to gcwq->nr_running and thus doesn't delay excution of other works. Note that although cpu intensive works won't delay other works, they can be delayed by other works. Combine with WQ_HIGHPRI to avoid being delayed by other works too. As the name suggests this is useful when using workqueue for cpu intensive works. Workers executing cpu intensive works are not considered for workqueue concurrency management and left for the scheduler to manage. Signed-off-by: Tejun Heo Cc: Andrew Morton --- include/linux/workqueue.h | 1 + kernel/workqueue.c | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 006dcf7e808a..3f36d37ac5ba 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -232,6 +232,7 @@ enum { WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ WQ_RESCUER = 1 << 3, /* has an rescue worker */ WQ_HIGHPRI = 1 << 4, /* high priority */ + WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5775717288d5..6fa847c5c5e9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -52,8 +52,10 @@ enum { WORKER_PREP = 1 << 3, /* preparing to run works */ WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ WORKER_REBIND = 1 << 5, /* mom is home, come back */ + WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ - WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND, + WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | + WORKER_CPU_INTENSIVE, /* gcwq->trustee_state */ TRUSTEE_START = 0, /* start */ @@ -1641,6 +1643,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct global_cwq *gcwq = cwq->gcwq; struct hlist_head *bwh = busy_worker_head(gcwq, work); + bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; work_func_t f = work->func; int work_color; struct worker *collision; @@ -1692,6 +1695,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work) gcwq->flags &= ~GCWQ_HIGHPRI_PENDING; } + /* + * CPU intensive works don't participate in concurrency + * management. They're the scheduler's responsibility. + */ + if (unlikely(cpu_intensive)) + worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); + spin_unlock_irq(&gcwq->lock); work_clear_pending(work); @@ -1713,6 +1723,10 @@ static void process_one_work(struct worker *worker, struct work_struct *work) spin_lock_irq(&gcwq->lock); + /* clear cpu intensive status */ + if (unlikely(cpu_intensive)) + worker_clr_flags(worker, WORKER_CPU_INTENSIVE); + /* we're done with it, release */ hlist_del_init(&worker->hentry); worker->current_work = NULL; -- cgit v1.2.3-59-g8ed1b From cb444766996395d4370bcc17ec895dd4e13ceb72 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:50 +0200 Subject: workqueue: use worker_set/clr_flags() only from worker itself worker_set/clr_flags() assume that if none of NOT_RUNNING flags is set the worker must be contributing to nr_running which is only true if the worker is actually running. As when called from self, it is guaranteed that the worker is running, those functions can be safely used from the worker itself and they aren't necessary from other places anyway. Make the following changes to fix the bug. * Make worker_set/clr_flags() whine if not called from self. * Convert all places which called those functions from other tasks to manipulate flags directly. * Make trustee_thread() directly clear nr_running after setting WORKER_ROGUE on all workers. This is the only place where nr_running manipulation is necessary outside of workers themselves. * While at it, add sanity check for nr_running in worker_enter_idle(). Signed-off-by: Tejun Heo --- kernel/workqueue.c | 47 ++++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6fa847c5c5e9..558733801ac0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -601,7 +601,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, /** * worker_set_flags - set worker flags and adjust nr_running accordingly - * @worker: worker to set flags for + * @worker: self * @flags: flags to set * @wakeup: wakeup an idle worker if necessary * @@ -609,14 +609,16 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, * nr_running becomes zero and @wakeup is %true, an idle worker is * woken up. * - * LOCKING: - * spin_lock_irq(gcwq->lock). + * CONTEXT: + * spin_lock_irq(gcwq->lock) */ static inline void worker_set_flags(struct worker *worker, unsigned int flags, bool wakeup) { struct global_cwq *gcwq = worker->gcwq; + WARN_ON_ONCE(worker->task != current); + /* * If transitioning into NOT_RUNNING, adjust nr_running and * wake up an idle worker as necessary if requested by @@ -639,19 +641,21 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, /** * worker_clr_flags - clear worker flags and adjust nr_running accordingly - * @worker: worker to set flags for + * @worker: self * @flags: flags to clear * * Clear @flags in @worker->flags and adjust nr_running accordingly. * - * LOCKING: - * spin_lock_irq(gcwq->lock). + * CONTEXT: + * spin_lock_irq(gcwq->lock) */ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) { struct global_cwq *gcwq = worker->gcwq; unsigned int oflags = worker->flags; + WARN_ON_ONCE(worker->task != current); + worker->flags &= ~flags; /* if transitioning out of NOT_RUNNING, increment nr_running */ @@ -1073,7 +1077,8 @@ static void worker_enter_idle(struct worker *worker) BUG_ON(!list_empty(&worker->entry) && (worker->hentry.next || worker->hentry.pprev)); - worker_set_flags(worker, WORKER_IDLE, false); + /* can't use worker_set_flags(), also called from start_worker() */ + worker->flags |= WORKER_IDLE; gcwq->nr_idle++; worker->last_active = jiffies; @@ -1086,6 +1091,10 @@ static void worker_enter_idle(struct worker *worker) jiffies + IDLE_WORKER_TIMEOUT); } else wake_up_all(&gcwq->trustee_wait); + + /* sanity check nr_running */ + WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle && + atomic_read(get_gcwq_nr_running(gcwq->cpu))); } /** @@ -1270,7 +1279,7 @@ fail: */ static void start_worker(struct worker *worker) { - worker_set_flags(worker, WORKER_STARTED, false); + worker->flags |= WORKER_STARTED; worker->gcwq->nr_workers++; worker_enter_idle(worker); wake_up_process(worker->task); @@ -1300,7 +1309,7 @@ static void destroy_worker(struct worker *worker) gcwq->nr_idle--; list_del_init(&worker->entry); - worker_set_flags(worker, WORKER_DIE, false); + worker->flags |= WORKER_DIE; spin_unlock_irq(&gcwq->lock); @@ -2979,10 +2988,10 @@ static int __cpuinit trustee_thread(void *__gcwq) gcwq->flags |= GCWQ_MANAGING_WORKERS; list_for_each_entry(worker, &gcwq->idle_list, entry) - worker_set_flags(worker, WORKER_ROGUE, false); + worker->flags |= WORKER_ROGUE; for_each_busy_worker(worker, i, pos, gcwq) - worker_set_flags(worker, WORKER_ROGUE, false); + worker->flags |= WORKER_ROGUE; /* * Call schedule() so that we cross rq->lock and thus can @@ -2995,12 +3004,12 @@ static int __cpuinit trustee_thread(void *__gcwq) spin_lock_irq(&gcwq->lock); /* - * Sched callbacks are disabled now. gcwq->nr_running should - * be zero and will stay that way, making need_more_worker() - * and keep_working() always return true as long as the - * worklist is not empty. + * Sched callbacks are disabled now. Zap nr_running. After + * this, nr_running stays zero and need_more_worker() and + * keep_working() are always true as long as the worklist is + * not empty. */ - WARN_ON_ONCE(atomic_read(get_gcwq_nr_running(gcwq->cpu)) != 0); + atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); spin_unlock_irq(&gcwq->lock); del_timer_sync(&gcwq->idle_timer); @@ -3046,7 +3055,7 @@ static int __cpuinit trustee_thread(void *__gcwq) worker = create_worker(gcwq, false); spin_lock_irq(&gcwq->lock); if (worker) { - worker_set_flags(worker, WORKER_ROGUE, false); + worker->flags |= WORKER_ROGUE; start_worker(worker); } } @@ -3085,8 +3094,8 @@ static int __cpuinit trustee_thread(void *__gcwq) * operations. Use a separate flag to mark that * rebinding is scheduled. */ - worker_set_flags(worker, WORKER_REBIND, false); - worker_clr_flags(worker, WORKER_ROGUE); + worker->flags |= WORKER_REBIND; + worker->flags &= ~WORKER_ROGUE; /* queue rebind_work, wq doesn't matter, use the default one */ if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, -- cgit v1.2.3-59-g8ed1b From 4ce48b37bfedc2bc11e61eae76784887e88b922c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: fix race condition in flush_workqueue() When one flusher is cascading to the next flusher, it first sets wq->first_flusher to the next one and sets up the next flush cycle. If there's nothing to do for the next cycle, it clears wq->flush_flusher and proceeds to the one after that. If the woken up flusher checks wq->first_flusher before it gets cleared, it will incorrectly assume the role of the first flusher, which triggers BUG_ON() sanity check. Fix it by checking wq->first_flusher again after grabbing the mutex. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 558733801ac0..b59c946433f4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2138,6 +2138,10 @@ void flush_workqueue(struct workqueue_struct *wq) mutex_lock(&wq->flush_mutex); + /* we might have raced, check again with mutex held */ + if (wq->first_flusher != &this_flusher) + goto out_unlock; + wq->first_flusher = NULL; BUG_ON(!list_empty(&this_flusher.list)); -- cgit v1.2.3-59-g8ed1b From a1e453d2799760ecf2e09ecd45b80edbe7ff540e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: fix incorrect cpu number BUG_ON() in get_work_gcwq() get_work_gcwq() was incorrectly triggering BUG_ON() if cpu number is equal to or higher than num_possible_cpus() instead of nr_cpu_ids. Fix it. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b59c946433f4..0c485a538099 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -445,7 +445,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) if (cpu == NR_CPUS) return NULL; - BUG_ON(cpu >= num_possible_cpus()); + BUG_ON(cpu >= nr_cpu_ids); return get_gcwq(cpu); } -- cgit v1.2.3-59-g8ed1b From d313dd85ad846bc768d58e9ceb28588f917f4c9a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: fix worker management invocation without pending works When there's no pending work to do, worker_thread() goes back to sleep after waking up without checking whether worker management is necessary. This means that idle worker exit requests can be ignored if the gcwq stays empty. Fix it by making worker_thread() always check whether worker management is necessary before going to sleep. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0c485a538099..2eb9fbddf5c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1832,10 +1832,10 @@ recheck: } while (keep_working(gcwq)); worker_set_flags(worker, WORKER_PREP, false); - +sleep: if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker)) goto recheck; -sleep: + /* * gcwq->lock is held and there's no work to process and no * need to manage, sleep. Workers are woken up only while -- cgit v1.2.3-59-g8ed1b From ad72cf9885c536e3adae03f8337557ac9dd1e4bb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:52 +0200 Subject: libata: take advantage of cmwq and remove concurrency limitations libata has two concurrency related limitations. a. ata_wq which is used for polling PIO has single thread per CPU. If there are multiple devices doing polling PIO on the same CPU, they can't be executed simultaneously. b. ata_aux_wq which is used for SCSI probing has single thread. In cases where SCSI probing is stalled for extended period of time which is possible for ATAPI devices, this will stall all probing. #a is solved by increasing maximum concurrency of ata_wq. Please note that polling PIO might be used under allocation path and thus needs to be served by a separate wq with a rescuer. #b is solved by using the default wq instead and achieving exclusion via per-port mutex. Signed-off-by: Tejun Heo Acked-by: Jeff Garzik --- drivers/ata/libata-core.c | 20 +++++--------------- drivers/ata/libata-eh.c | 4 ++-- drivers/ata/libata-scsi.c | 10 ++++++---- drivers/ata/libata-sff.c | 9 +-------- drivers/ata/libata.h | 1 - include/linux/libata.h | 1 + 6 files changed, 15 insertions(+), 30 deletions(-) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ddf8e4862787..4f78741692dc 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -98,8 +98,6 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev); unsigned int ata_print_id = 1; -struct workqueue_struct *ata_aux_wq; - struct ata_force_param { const char *name; unsigned int cbl; @@ -5611,6 +5609,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; #endif + mutex_init(&ap->scsi_scan_mutex); INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_LIST_HEAD(&ap->eh_done_q); @@ -6549,29 +6548,20 @@ static int __init ata_init(void) ata_parse_force_param(); - ata_aux_wq = create_singlethread_workqueue("ata_aux"); - if (!ata_aux_wq) - goto fail; - rc = ata_sff_init(); - if (rc) - goto fail; + if (rc) { + kfree(ata_force_tbl); + return rc; + } printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); return 0; - -fail: - kfree(ata_force_tbl); - if (ata_aux_wq) - destroy_workqueue(ata_aux_wq); - return rc; } static void __exit ata_exit(void) { ata_sff_exit(); kfree(ata_force_tbl); - destroy_workqueue(ata_aux_wq); } subsys_initcall(ata_init); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f77a67303f8b..4d2af824dd23 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -727,7 +727,7 @@ void ata_scsi_error(struct Scsi_Host *host) if (ap->pflags & ATA_PFLAG_LOADING) ap->pflags &= ~ATA_PFLAG_LOADING; else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) - queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); + schedule_delayed_work(&ap->hotplug_task, 0); if (ap->pflags & ATA_PFLAG_RECOVERED) ata_port_printk(ap, KERN_INFO, "EH complete\n"); @@ -2944,7 +2944,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, ehc->i.flags |= ATA_EHI_SETMODE; /* schedule the scsi_rescan_device() here */ - queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); + schedule_work(&(ap->scsi_rescan_task)); } else if (dev->class == ATA_DEV_UNKNOWN && ehc->tries[dev->devno] && ata_class_enabled(ehc->classes[dev->devno])) { diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a54273d2c3c6..d75c9c479d1a 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3435,7 +3435,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) " switching to async\n"); } - queue_delayed_work(ata_aux_wq, &ap->hotplug_task, + queue_delayed_work(system_long_wq, &ap->hotplug_task, round_jiffies_relative(HZ)); } @@ -3582,6 +3582,7 @@ void ata_scsi_hotplug(struct work_struct *work) } DPRINTK("ENTER\n"); + mutex_lock(&ap->scsi_scan_mutex); /* Unplug detached devices. We cannot use link iterator here * because PMP links have to be scanned even if PMP is @@ -3595,6 +3596,7 @@ void ata_scsi_hotplug(struct work_struct *work) /* scan for new ones */ ata_scsi_scan_host(ap, 0); + mutex_unlock(&ap->scsi_scan_mutex); DPRINTK("EXIT\n"); } @@ -3673,9 +3675,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, * @work: Pointer to ATA port to perform scsi_rescan_device() * * After ATA pass thru (SAT) commands are executed successfully, - * libata need to propagate the changes to SCSI layer. This - * function must be executed from ata_aux_wq such that sdev - * attach/detach don't race with rescan. + * libata need to propagate the changes to SCSI layer. * * LOCKING: * Kernel thread context (may sleep). @@ -3688,6 +3688,7 @@ void ata_scsi_dev_rescan(struct work_struct *work) struct ata_device *dev; unsigned long flags; + mutex_lock(&ap->scsi_scan_mutex); spin_lock_irqsave(ap->lock, flags); ata_for_each_link(link, ap, EDGE) { @@ -3707,6 +3708,7 @@ void ata_scsi_dev_rescan(struct work_struct *work) } spin_unlock_irqrestore(ap->lock, flags); + mutex_unlock(&ap->scsi_scan_mutex); } /** diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index efa4a18cfb9d..674c1436491f 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -3318,14 +3318,7 @@ void ata_sff_port_init(struct ata_port *ap) int __init ata_sff_init(void) { - /* - * FIXME: In UP case, there is only one workqueue thread and if you - * have more than one PIO device, latency is bloody awful, with - * occasional multi-second "hiccups" as one PIO device waits for - * another. It's an ugly wart that users DO occasionally complain - * about; luckily most users have at most one PIO polled device. - */ - ata_sff_wq = create_workqueue("ata_sff"); + ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE); if (!ata_sff_wq) return -ENOMEM; diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 4b84ed60324a..9ce1ecc63e39 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -54,7 +54,6 @@ enum { }; extern unsigned int ata_print_id; -extern struct workqueue_struct *ata_aux_wq; extern int atapi_passthru16; extern int libata_fua; extern int libata_noacpi; diff --git a/include/linux/libata.h b/include/linux/libata.h index b85f3ff34d7d..f010f18a0f86 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -751,6 +751,7 @@ struct ata_port { struct ata_host *host; struct device *dev; + struct mutex scsi_scan_mutex; struct delayed_work hotplug_task; struct work_struct scsi_rescan_task; -- cgit v1.2.3-59-g8ed1b From bdbc5dd7de5d07d6c9d3536e598956165a031d4c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: prepare for WQ_UNBOUND implementation In preparation of WQ_UNBOUND addition, make the following changes. * Add WORK_CPU_* constants for pseudo cpu id numbers used (currently only WORK_CPU_NONE) and use them instead of NR_CPUS. This is to allow another pseudo cpu id for unbound cpu. * Reorder WQ_* flags. * Make workqueue_struct->cpu_wq a union which contains a percpu pointer, regular pointer and an unsigned long value and use kzalloc/kfree() in UP allocation path. This will be used to implement unbound workqueues which will use only one cwq on SMPs. * Move alloc_cwqs() allocation after initialization of wq fields, so that alloc_cwqs() has access to wq->flags. * Trivial relocation of wq local variables in freeze functions. These changes don't cause any functional change. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 10 ++++-- kernel/workqueue.c | 83 ++++++++++++++++++++++++----------------------- 2 files changed, 50 insertions(+), 43 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 3f36d37ac5ba..139069a6286c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -50,6 +50,10 @@ enum { WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, WORK_NO_COLOR = WORK_NR_COLORS, + /* special cpu IDs */ + WORK_CPU_NONE = NR_CPUS, + WORK_CPU_LAST = WORK_CPU_NONE, + /* * Reserve 6 bits off of cwq pointer w/ debugobjects turned * off. This makes cwqs aligned to 64 bytes which isn't too @@ -60,7 +64,7 @@ enum { WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, - WORK_STRUCT_NO_CPU = NR_CPUS << WORK_STRUCT_FLAG_BITS, + WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, /* bit mask for work_busy() return values */ WORK_BUSY_PENDING = 1 << 0, @@ -227,9 +231,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) enum { - WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */ + WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ - WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ + WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ WQ_RESCUER = 1 << 3, /* has an rescue worker */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2eb9fbddf5c6..a105ddf55f79 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -190,7 +190,11 @@ struct wq_flusher { */ struct workqueue_struct { unsigned int flags; /* I: WQ_* flags */ - struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */ + union { + struct cpu_workqueue_struct __percpu *pcpu; + struct cpu_workqueue_struct *single; + unsigned long v; + } cpu_wq; /* I: cwq's */ struct list_head list; /* W: list of all workqueues */ struct mutex flush_mutex; /* protects wq flushing */ @@ -362,7 +366,11 @@ static atomic_t *get_gcwq_nr_running(unsigned int cpu) static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, struct workqueue_struct *wq) { - return per_cpu_ptr(wq->cpu_wq, cpu); +#ifndef CONFIG_SMP + return wq->cpu_wq.single; +#else + return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); +#endif } static unsigned int work_color_to_flags(int color) @@ -442,7 +450,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) return ((struct cpu_workqueue_struct *)data)->gcwq; cpu = data >> WORK_STRUCT_FLAG_BITS; - if (cpu == NR_CPUS) + if (cpu == WORK_CPU_NONE) return NULL; BUG_ON(cpu >= nr_cpu_ids); @@ -846,7 +854,7 @@ static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) */ if (likely(!(gcwq->flags & GCWQ_FREEZING))) { smp_wmb(); /* paired with cmpxchg() in __queue_work() */ - wq->single_cpu = NR_CPUS; + wq->single_cpu = WORK_CPU_NONE; } } @@ -904,7 +912,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, */ retry: cpu = wq->single_cpu; - arbitrate = cpu == NR_CPUS; + arbitrate = cpu == WORK_CPU_NONE; if (arbitrate) cpu = req_cpu; @@ -918,7 +926,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, * visible on the new cpu after this point. */ if (arbitrate) - cmpxchg(&wq->single_cpu, NR_CPUS, cpu); + cmpxchg(&wq->single_cpu, WORK_CPU_NONE, cpu); if (unlikely(wq->single_cpu != cpu)) { spin_unlock_irqrestore(&gcwq->lock, flags); @@ -2572,7 +2580,7 @@ int keventd_up(void) return system_wq != NULL; } -static struct cpu_workqueue_struct *alloc_cwqs(void) +static int alloc_cwqs(struct workqueue_struct *wq) { /* * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS. @@ -2582,40 +2590,36 @@ static struct cpu_workqueue_struct *alloc_cwqs(void) const size_t size = sizeof(struct cpu_workqueue_struct); const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long)); - struct cpu_workqueue_struct *cwqs; #ifndef CONFIG_SMP void *ptr; /* - * On UP, percpu allocator doesn't honor alignment parameter - * and simply uses arch-dependent default. Allocate enough - * room to align cwq and put an extra pointer at the end - * pointing back to the originally allocated pointer which - * will be used for free. - * - * FIXME: This really belongs to UP percpu code. Update UP - * percpu code to honor alignment and remove this ugliness. + * Allocate enough room to align cwq and put an extra pointer + * at the end pointing back to the originally allocated + * pointer which will be used for free. */ - ptr = __alloc_percpu(size + align + sizeof(void *), 1); - cwqs = PTR_ALIGN(ptr, align); - *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr; + ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); + if (ptr) { + wq->cpu_wq.single = PTR_ALIGN(ptr, align); + *(void **)(wq->cpu_wq.single + 1) = ptr; + } #else - /* On SMP, percpu allocator can do it itself */ - cwqs = __alloc_percpu(size, align); + /* On SMP, percpu allocator can align itself */ + wq->cpu_wq.pcpu = __alloc_percpu(size, align); #endif /* just in case, make sure it's actually aligned */ - BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align)); - return cwqs; + BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); + return wq->cpu_wq.v ? 0 : -ENOMEM; } -static void free_cwqs(struct cpu_workqueue_struct *cwqs) +static void free_cwqs(struct workqueue_struct *wq) { #ifndef CONFIG_SMP /* on UP, the pointer to free is stored right after the cwq */ - if (cwqs) - free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0)); + if (wq->cpu_wq.single) + kfree(*(void **)(wq->cpu_wq.single + 1)); #else - free_percpu(cwqs); + free_percpu(wq->cpu_wq.pcpu); #endif } @@ -2645,22 +2649,21 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, if (!wq) goto err; - wq->cpu_wq = alloc_cwqs(); - if (!wq->cpu_wq) - goto err; - wq->flags = flags; wq->saved_max_active = max_active; mutex_init(&wq->flush_mutex); atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_overflow); - wq->single_cpu = NR_CPUS; + wq->single_cpu = WORK_CPU_NONE; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); + if (alloc_cwqs(wq) < 0) + goto err; + for_each_possible_cpu(cpu) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct global_cwq *gcwq = get_gcwq(cpu); @@ -2710,7 +2713,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, return wq; err: if (wq) { - free_cwqs(wq->cpu_wq); + free_cwqs(wq); free_cpumask_var(wq->mayday_mask); kfree(wq->rescuer); kfree(wq); @@ -2755,7 +2758,7 @@ void destroy_workqueue(struct workqueue_struct *wq) free_cpumask_var(wq->mayday_mask); } - free_cwqs(wq->cpu_wq); + free_cwqs(wq); kfree(wq); } EXPORT_SYMBOL_GPL(destroy_workqueue); @@ -2821,13 +2824,13 @@ EXPORT_SYMBOL_GPL(workqueue_congested); * @work: the work of interest * * RETURNS: - * CPU number if @work was ever queued. NR_CPUS otherwise. + * CPU number if @work was ever queued. WORK_CPU_NONE otherwise. */ unsigned int work_cpu(struct work_struct *work) { struct global_cwq *gcwq = get_work_gcwq(work); - return gcwq ? gcwq->cpu : NR_CPUS; + return gcwq ? gcwq->cpu : WORK_CPU_NONE; } EXPORT_SYMBOL_GPL(work_cpu); @@ -3300,7 +3303,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu); */ void freeze_workqueues_begin(void) { - struct workqueue_struct *wq; unsigned int cpu; spin_lock(&workqueue_lock); @@ -3310,6 +3312,7 @@ void freeze_workqueues_begin(void) for_each_possible_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); + struct workqueue_struct *wq; spin_lock_irq(&gcwq->lock); @@ -3344,7 +3347,6 @@ void freeze_workqueues_begin(void) */ bool freeze_workqueues_busy(void) { - struct workqueue_struct *wq; unsigned int cpu; bool busy = false; @@ -3353,6 +3355,7 @@ bool freeze_workqueues_busy(void) BUG_ON(!workqueue_freezing); for_each_possible_cpu(cpu) { + struct workqueue_struct *wq; /* * nr_active is monotonically decreasing. It's safe * to peek without lock. @@ -3386,7 +3389,6 @@ out_unlock: */ void thaw_workqueues(void) { - struct workqueue_struct *wq; unsigned int cpu; spin_lock(&workqueue_lock); @@ -3396,6 +3398,7 @@ void thaw_workqueues(void) for_each_possible_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); + struct workqueue_struct *wq; spin_lock_irq(&gcwq->lock); @@ -3443,7 +3446,7 @@ void __init init_workqueues(void) * sure cpu number won't overflow into kernel pointer area so * that they can be distinguished. */ - BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); + BUILD_BUG_ON(WORK_CPU_LAST << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); -- cgit v1.2.3-59-g8ed1b From f34217977d717385a3e9fd7018ac39fade3964c0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: implement unbound workqueue This patch implements unbound workqueue which can be specified with WQ_UNBOUND flag on creation. An unbound workqueue has the following properties. * It uses a dedicated gcwq with a pseudo CPU number WORK_CPU_UNBOUND. This gcwq is always online and disassociated. * Workers are not bound to any CPU and not concurrency managed. Works are dispatched to workers as soon as possible and the only applied limitation is @max_active. IOW, all unbound workqeueues are implicitly high priority. Unbound workqueues can be used as simple execution context provider. Contexts unbound to any cpu are served as soon as possible. Signed-off-by: Tejun Heo Cc: Arjan van de Ven Cc: David Howells --- include/linux/workqueue.h | 15 +++- kernel/workqueue.c | 218 +++++++++++++++++++++++++++++++++------------- 2 files changed, 173 insertions(+), 60 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 139069a6286c..67ce734747f6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -51,7 +51,8 @@ enum { WORK_NO_COLOR = WORK_NR_COLORS, /* special cpu IDs */ - WORK_CPU_NONE = NR_CPUS, + WORK_CPU_UNBOUND = NR_CPUS, + WORK_CPU_NONE = NR_CPUS + 1, WORK_CPU_LAST = WORK_CPU_NONE, /* @@ -237,11 +238,17 @@ enum { WQ_RESCUER = 1 << 3, /* has an rescue worker */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ + WQ_UNBOUND = 1 << 6, /* not bound to any cpu */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, }; +/* unbound wq's aren't per-cpu, scale max_active according to #cpus */ +#define WQ_UNBOUND_MAX_ACTIVE \ + max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) + /* * System-wide workqueues which are always present. * @@ -256,10 +263,16 @@ enum { * system_nrt_wq is non-reentrant and guarantees that any given work * item is never executed in parallel by multiple CPUs. Queue * flushing might take relatively long. + * + * system_unbound_wq is unbound workqueue. Workers are not bound to + * any specific CPU, not concurrency managed, and all queued works are + * executed immediately as long as max_active limit is not reached and + * resources are available. */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_nrt_wq; +extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct * __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a105ddf55f79..4608563cdd63 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -53,9 +53,10 @@ enum { WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ WORKER_REBIND = 1 << 5, /* mom is home, come back */ WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ + WORKER_UNBOUND = 1 << 7, /* worker is unbound */ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | - WORKER_CPU_INTENSIVE, + WORKER_CPU_INTENSIVE | WORKER_UNBOUND, /* gcwq->trustee_state */ TRUSTEE_START = 0, /* start */ @@ -96,7 +97,7 @@ enum { * X: During normal operation, modification requires gcwq->lock and * should be done only from local cpu. Either disabling preemption * on local cpu or grabbing gcwq->lock is enough for read access. - * While trustee is in charge, it's identical to L. + * If GCWQ_DISASSOCIATED is set, it's identical to L. * * F: wq->flush_mutex protected. * @@ -220,14 +221,52 @@ struct workqueue_struct { struct workqueue_struct *system_wq __read_mostly; struct workqueue_struct *system_long_wq __read_mostly; struct workqueue_struct *system_nrt_wq __read_mostly; +struct workqueue_struct *system_unbound_wq __read_mostly; EXPORT_SYMBOL_GPL(system_wq); EXPORT_SYMBOL_GPL(system_long_wq); EXPORT_SYMBOL_GPL(system_nrt_wq); +EXPORT_SYMBOL_GPL(system_unbound_wq); #define for_each_busy_worker(worker, i, pos, gcwq) \ for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) +static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, + unsigned int sw) +{ + if (cpu < nr_cpu_ids) { + if (sw & 1) { + cpu = cpumask_next(cpu, mask); + if (cpu < nr_cpu_ids) + return cpu; + } + if (sw & 2) + return WORK_CPU_UNBOUND; + } + return WORK_CPU_NONE; +} + +static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, + struct workqueue_struct *wq) +{ + return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); +} + +#define for_each_gcwq_cpu(cpu) \ + for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) + +#define for_each_online_gcwq_cpu(cpu) \ + for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) + +#define for_each_cwq_cpu(cpu, wq) \ + for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ + (cpu) < WORK_CPU_NONE; \ + (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) + #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; @@ -351,26 +390,46 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */ static DEFINE_PER_CPU(struct global_cwq, global_cwq); static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); +/* + * Global cpu workqueue and nr_running counter for unbound gcwq. The + * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its + * workers have WORKER_UNBOUND set. + */ +static struct global_cwq unbound_global_cwq; +static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */ + static int worker_thread(void *__worker); static struct global_cwq *get_gcwq(unsigned int cpu) { - return &per_cpu(global_cwq, cpu); + if (cpu != WORK_CPU_UNBOUND) + return &per_cpu(global_cwq, cpu); + else + return &unbound_global_cwq; } static atomic_t *get_gcwq_nr_running(unsigned int cpu) { - return &per_cpu(gcwq_nr_running, cpu); + if (cpu != WORK_CPU_UNBOUND) + return &per_cpu(gcwq_nr_running, cpu); + else + return &unbound_gcwq_nr_running; } static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, struct workqueue_struct *wq) { -#ifndef CONFIG_SMP - return wq->cpu_wq.single; + if (!(wq->flags & WQ_UNBOUND)) { + if (likely(cpu < nr_cpu_ids)) { +#ifdef CONFIG_SMP + return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); #else - return per_cpu_ptr(wq->cpu_wq.pcpu, cpu); + return wq->cpu_wq.single; #endif + } + } else if (likely(cpu == WORK_CPU_UNBOUND)) + return wq->cpu_wq.single; + return NULL; } static unsigned int work_color_to_flags(int color) @@ -453,7 +512,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work) if (cpu == WORK_CPU_NONE) return NULL; - BUG_ON(cpu >= nr_cpu_ids); + BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); return get_gcwq(cpu); } @@ -869,11 +928,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, debug_work_activate(work); + if (unlikely(cpu == WORK_CPU_UNBOUND)) + cpu = raw_smp_processor_id(); + /* * Determine gcwq to use. SINGLE_CPU is inherently * NON_REENTRANT, so test it first. */ - if (!(wq->flags & WQ_SINGLE_CPU)) { + if (!(wq->flags & (WQ_SINGLE_CPU | WQ_UNBOUND))) { struct global_cwq *last_gcwq; /* @@ -900,7 +962,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, } } else spin_lock_irqsave(&gcwq->lock, flags); - } else { + } else if (!(wq->flags & WQ_UNBOUND)) { unsigned int req_cpu = cpu; /* @@ -932,6 +994,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, spin_unlock_irqrestore(&gcwq->lock, flags); goto retry; } + } else { + gcwq = get_gcwq(WORK_CPU_UNBOUND); + spin_lock_irqsave(&gcwq->lock, flags); } /* gcwq determined, get cwq and queue */ @@ -1166,7 +1231,8 @@ static bool worker_maybe_bind_and_lock(struct worker *worker) * it races with cpu hotunplug operation. Verify * against GCWQ_DISASSOCIATED. */ - set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); + if (!(gcwq->flags & GCWQ_DISASSOCIATED)) + set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); spin_lock_irq(&gcwq->lock); if (gcwq->flags & GCWQ_DISASSOCIATED) @@ -1231,8 +1297,9 @@ static struct worker *alloc_worker(void) */ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) { - int id = -1; + bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; struct worker *worker = NULL; + int id = -1; spin_lock_irq(&gcwq->lock); while (ida_get_new(&gcwq->worker_ida, &id)) { @@ -1250,8 +1317,12 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) worker->gcwq = gcwq; worker->id = id; - worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d", - gcwq->cpu, id); + if (!on_unbound_cpu) + worker->task = kthread_create(worker_thread, worker, + "kworker/%u:%d", gcwq->cpu, id); + else + worker->task = kthread_create(worker_thread, worker, + "kworker/u:%d", id); if (IS_ERR(worker->task)) goto fail; @@ -1260,10 +1331,13 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind) * online later on. Make sure every worker has * PF_THREAD_BOUND set. */ - if (bind) + if (bind && !on_unbound_cpu) kthread_bind(worker->task, gcwq->cpu); - else + else { worker->task->flags |= PF_THREAD_BOUND; + if (on_unbound_cpu) + worker->flags |= WORKER_UNBOUND; + } return worker; fail: @@ -1358,12 +1432,17 @@ static bool send_mayday(struct work_struct *work) { struct cpu_workqueue_struct *cwq = get_work_cwq(work); struct workqueue_struct *wq = cwq->wq; + unsigned int cpu; if (!(wq->flags & WQ_RESCUER)) return false; /* mayday mayday mayday */ - if (!cpumask_test_and_set_cpu(cwq->gcwq->cpu, wq->mayday_mask)) + cpu = cwq->gcwq->cpu; + /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ + if (cpu == WORK_CPU_UNBOUND) + cpu = 0; + if (!cpumask_test_and_set_cpu(cpu, wq->mayday_mask)) wake_up_process(wq->rescuer->task); return true; } @@ -1882,6 +1961,7 @@ static int rescuer_thread(void *__wq) struct workqueue_struct *wq = __wq; struct worker *rescuer = wq->rescuer; struct list_head *scheduled = &rescuer->scheduled; + bool is_unbound = wq->flags & WQ_UNBOUND; unsigned int cpu; set_user_nice(current, RESCUER_NICE_LEVEL); @@ -1891,8 +1971,13 @@ repeat: if (kthread_should_stop()) return 0; + /* + * See whether any cpu is asking for help. Unbounded + * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. + */ for_each_cpu(cpu, wq->mayday_mask) { - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; + struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); struct global_cwq *gcwq = cwq->gcwq; struct work_struct *work, *n; @@ -2034,7 +2119,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, atomic_set(&wq->nr_cwqs_to_flush, 1); } - for_each_possible_cpu(cpu) { + for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct global_cwq *gcwq = cwq->gcwq; @@ -2344,7 +2429,7 @@ static void wait_on_work(struct work_struct *work) lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); - for_each_possible_cpu(cpu) + for_each_gcwq_cpu(cpu) wait_on_cpu_work(get_gcwq(cpu), work); } @@ -2590,23 +2675,25 @@ static int alloc_cwqs(struct workqueue_struct *wq) const size_t size = sizeof(struct cpu_workqueue_struct); const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long)); -#ifndef CONFIG_SMP - void *ptr; - /* - * Allocate enough room to align cwq and put an extra pointer - * at the end pointing back to the originally allocated - * pointer which will be used for free. - */ - ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); - if (ptr) { - wq->cpu_wq.single = PTR_ALIGN(ptr, align); - *(void **)(wq->cpu_wq.single + 1) = ptr; + if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) { + /* on SMP, percpu allocator can align itself */ + wq->cpu_wq.pcpu = __alloc_percpu(size, align); + } else { + void *ptr; + + /* + * Allocate enough room to align cwq and put an extra + * pointer at the end pointing back to the originally + * allocated pointer which will be used for free. + */ + ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL); + if (ptr) { + wq->cpu_wq.single = PTR_ALIGN(ptr, align); + *(void **)(wq->cpu_wq.single + 1) = ptr; + } } -#else - /* On SMP, percpu allocator can align itself */ - wq->cpu_wq.pcpu = __alloc_percpu(size, align); -#endif + /* just in case, make sure it's actually aligned */ BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); return wq->cpu_wq.v ? 0 : -ENOMEM; @@ -2614,23 +2701,25 @@ static int alloc_cwqs(struct workqueue_struct *wq) static void free_cwqs(struct workqueue_struct *wq) { -#ifndef CONFIG_SMP - /* on UP, the pointer to free is stored right after the cwq */ - if (wq->cpu_wq.single) + if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) + free_percpu(wq->cpu_wq.pcpu); + else if (wq->cpu_wq.single) { + /* the pointer to free is stored right after the cwq */ kfree(*(void **)(wq->cpu_wq.single + 1)); -#else - free_percpu(wq->cpu_wq.pcpu); -#endif + } } -static int wq_clamp_max_active(int max_active, const char *name) +static int wq_clamp_max_active(int max_active, unsigned int flags, + const char *name) { - if (max_active < 1 || max_active > WQ_MAX_ACTIVE) + int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; + + if (max_active < 1 || max_active > lim) printk(KERN_WARNING "workqueue: max_active %d requested for %s " "is out of range, clamping between %d and %d\n", - max_active, name, 1, WQ_MAX_ACTIVE); + max_active, name, 1, lim); - return clamp_val(max_active, 1, WQ_MAX_ACTIVE); + return clamp_val(max_active, 1, lim); } struct workqueue_struct *__alloc_workqueue_key(const char *name, @@ -2642,8 +2731,15 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, struct workqueue_struct *wq; unsigned int cpu; + /* + * Unbound workqueues aren't concurrency managed and should be + * dispatched to workers immediately. + */ + if (flags & WQ_UNBOUND) + flags |= WQ_HIGHPRI; + max_active = max_active ?: WQ_DFL_ACTIVE; - max_active = wq_clamp_max_active(max_active, name); + max_active = wq_clamp_max_active(max_active, flags, name); wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) @@ -2664,7 +2760,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, if (alloc_cwqs(wq) < 0) goto err; - for_each_possible_cpu(cpu) { + for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct global_cwq *gcwq = get_gcwq(cpu); @@ -2703,7 +2799,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, spin_lock(&workqueue_lock); if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) - for_each_possible_cpu(cpu) + for_each_cwq_cpu(cpu, wq) get_cwq(cpu, wq)->max_active = 0; list_add(&wq->list, &workqueues); @@ -2743,7 +2839,7 @@ void destroy_workqueue(struct workqueue_struct *wq) spin_unlock(&workqueue_lock); /* sanity check */ - for_each_possible_cpu(cpu) { + for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); int i; @@ -2777,13 +2873,13 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) { unsigned int cpu; - max_active = wq_clamp_max_active(max_active, wq->name); + max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); spin_lock(&workqueue_lock); wq->saved_max_active = max_active; - for_each_possible_cpu(cpu) { + for_each_cwq_cpu(cpu, wq) { struct global_cwq *gcwq = get_gcwq(cpu); spin_lock_irq(&gcwq->lock); @@ -3310,7 +3406,7 @@ void freeze_workqueues_begin(void) BUG_ON(workqueue_freezing); workqueue_freezing = true; - for_each_possible_cpu(cpu) { + for_each_gcwq_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); struct workqueue_struct *wq; @@ -3322,7 +3418,7 @@ void freeze_workqueues_begin(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (wq->flags & WQ_FREEZEABLE) + if (cwq && wq->flags & WQ_FREEZEABLE) cwq->max_active = 0; } @@ -3354,7 +3450,7 @@ bool freeze_workqueues_busy(void) BUG_ON(!workqueue_freezing); - for_each_possible_cpu(cpu) { + for_each_gcwq_cpu(cpu) { struct workqueue_struct *wq; /* * nr_active is monotonically decreasing. It's safe @@ -3363,7 +3459,7 @@ bool freeze_workqueues_busy(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (!(wq->flags & WQ_FREEZEABLE)) + if (!cwq || !(wq->flags & WQ_FREEZEABLE)) continue; BUG_ON(cwq->nr_active < 0); @@ -3396,7 +3492,7 @@ void thaw_workqueues(void) if (!workqueue_freezing) goto out_unlock; - for_each_possible_cpu(cpu) { + for_each_gcwq_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); struct workqueue_struct *wq; @@ -3408,7 +3504,7 @@ void thaw_workqueues(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (!(wq->flags & WQ_FREEZEABLE)) + if (!cwq || !(wq->flags & WQ_FREEZEABLE)) continue; /* restore max_active and repopulate worklist */ @@ -3451,12 +3547,14 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ - for_each_possible_cpu(cpu) { + for_each_gcwq_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); spin_lock_init(&gcwq->lock); INIT_LIST_HEAD(&gcwq->worklist); gcwq->cpu = cpu; + if (cpu == WORK_CPU_UNBOUND) + gcwq->flags |= GCWQ_DISASSOCIATED; INIT_LIST_HEAD(&gcwq->idle_list); for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) @@ -3476,7 +3574,7 @@ void __init init_workqueues(void) } /* create the initial worker */ - for_each_online_cpu(cpu) { + for_each_online_gcwq_cpu(cpu) { struct global_cwq *gcwq = get_gcwq(cpu); struct worker *worker; @@ -3490,5 +3588,7 @@ void __init init_workqueues(void) system_wq = alloc_workqueue("events", 0, 0); system_long_wq = alloc_workqueue("events_long", 0, 0); system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); + system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE); BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); } -- cgit v1.2.3-59-g8ed1b From c7fc77f78f16d138ca997ce096a62f46e2e9420a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:51 +0200 Subject: workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead WQ_SINGLE_CPU combined with @max_active of 1 is used to achieve full ordering among works queued to a workqueue. The same can be achieved using WQ_UNBOUND as unbound workqueues always use the gcwq for WORK_CPU_UNBOUND. As @max_active is always one and benefits from cpu locality isn't accessible anyway, serving them with unbound workqueues should be fine. Drop WQ_SINGLE_CPU support and use WQ_UNBOUND instead. Note that most single thread workqueue users will be converted to use multithread or non-reentrant instead and only the ones which require strict ordering will keep using WQ_UNBOUND + @max_active of 1. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 7 ++-- kernel/workqueue.c | 100 +++++++++------------------------------------- 2 files changed, 21 insertions(+), 86 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 67ce734747f6..d74a529ed13e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -233,12 +233,11 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } enum { WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ - WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ + WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ WQ_RESCUER = 1 << 3, /* has an rescue worker */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ - WQ_UNBOUND = 1 << 6, /* not bound to any cpu */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ @@ -300,9 +299,9 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, #define create_workqueue(name) \ alloc_workqueue((name), WQ_RESCUER, 1) #define create_freezeable_workqueue(name) \ - alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1) + alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) #define create_singlethread_workqueue(name) \ - alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1) + alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4608563cdd63..20d6237d7498 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -206,8 +206,6 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ - unsigned long single_cpu; /* cpu for single cpu wq */ - cpumask_var_t mayday_mask; /* cpus requesting rescue */ struct worker *rescuer; /* I: rescue worker */ @@ -889,34 +887,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, wake_up_worker(gcwq); } -/** - * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing - * @cwq: cwq to unbind - * - * Try to unbind @cwq from single cpu workqueue processing. If - * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed. - * - * CONTEXT: - * spin_lock_irq(gcwq->lock). - */ -static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) -{ - struct workqueue_struct *wq = cwq->wq; - struct global_cwq *gcwq = cwq->gcwq; - - BUG_ON(wq->single_cpu != gcwq->cpu); - /* - * Unbind from workqueue if @cwq is not frozen. If frozen, - * thaw_workqueues() will either restart processing on this - * cpu or unbind if empty. This keeps works queued while - * frozen fully ordered and flushable. - */ - if (likely(!(gcwq->flags & GCWQ_FREEZING))) { - smp_wmb(); /* paired with cmpxchg() in __queue_work() */ - wq->single_cpu = WORK_CPU_NONE; - } -} - static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { @@ -924,20 +894,16 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct cpu_workqueue_struct *cwq; struct list_head *worklist; unsigned long flags; - bool arbitrate; debug_work_activate(work); - if (unlikely(cpu == WORK_CPU_UNBOUND)) - cpu = raw_smp_processor_id(); - - /* - * Determine gcwq to use. SINGLE_CPU is inherently - * NON_REENTRANT, so test it first. - */ - if (!(wq->flags & (WQ_SINGLE_CPU | WQ_UNBOUND))) { + /* determine gcwq to use */ + if (!(wq->flags & WQ_UNBOUND)) { struct global_cwq *last_gcwq; + if (unlikely(cpu == WORK_CPU_UNBOUND)) + cpu = raw_smp_processor_id(); + /* * It's multi cpu. If @wq is non-reentrant and @work * was previously on a different cpu, it might still @@ -962,38 +928,6 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, } } else spin_lock_irqsave(&gcwq->lock, flags); - } else if (!(wq->flags & WQ_UNBOUND)) { - unsigned int req_cpu = cpu; - - /* - * It's a bit more complex for single cpu workqueues. - * We first need to determine which cpu is going to be - * used. If no cpu is currently serving this - * workqueue, arbitrate using atomic accesses to - * wq->single_cpu; otherwise, use the current one. - */ - retry: - cpu = wq->single_cpu; - arbitrate = cpu == WORK_CPU_NONE; - if (arbitrate) - cpu = req_cpu; - - gcwq = get_gcwq(cpu); - spin_lock_irqsave(&gcwq->lock, flags); - - /* - * The following cmpxchg() is a full barrier paired - * with smp_wmb() in cwq_unbind_single_cpu() and - * guarantees that all changes to wq->st_* fields are - * visible on the new cpu after this point. - */ - if (arbitrate) - cmpxchg(&wq->single_cpu, WORK_CPU_NONE, cpu); - - if (unlikely(wq->single_cpu != cpu)) { - spin_unlock_irqrestore(&gcwq->lock, flags); - goto retry; - } } else { gcwq = get_gcwq(WORK_CPU_UNBOUND); spin_lock_irqsave(&gcwq->lock, flags); @@ -1105,19 +1039,30 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work = &dwork->work; if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - struct global_cwq *gcwq = get_work_gcwq(work); - unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id(); + unsigned int lcpu; BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); timer_stats_timer_set_start_info(&dwork->timer); + /* * This stores cwq for the moment, for the timer_fn. * Note that the work's gcwq is preserved to allow * reentrance detection for delayed works. */ + if (!(wq->flags & WQ_UNBOUND)) { + struct global_cwq *gcwq = get_work_gcwq(work); + + if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) + lcpu = gcwq->cpu; + else + lcpu = raw_smp_processor_id(); + } else + lcpu = WORK_CPU_UNBOUND; + set_work_cwq(work, get_cwq(lcpu, wq), 0); + timer->expires = jiffies + delay; timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; @@ -1696,9 +1641,6 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) /* one down, submit a delayed one */ if (cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); - } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) { - /* this was the last work, unbind from single cpu */ - cwq_unbind_single_cpu(cwq); } /* is flush in progress and are we at the flushing tip? */ @@ -2751,7 +2693,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, atomic_set(&wq->nr_cwqs_to_flush, 0); INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_overflow); - wq->single_cpu = WORK_CPU_NONE; wq->name = name; lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); @@ -3513,11 +3454,6 @@ void thaw_workqueues(void) while (!list_empty(&cwq->delayed_works) && cwq->nr_active < cwq->max_active) cwq_activate_first_delayed(cwq); - - /* perform delayed unbind from single cpu if empty */ - if (wq->single_cpu == gcwq->cpu && - !cwq->nr_active && list_empty(&cwq->delayed_works)) - cwq_unbind_single_cpu(cwq); } wake_up_worker(gcwq); -- cgit v1.2.3-59-g8ed1b From 083b804c4d3e1e3d0eace56bdbc0f674946d2847 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 2 Jul 2010 10:03:52 +0200 Subject: async: use workqueue for worker pool Replace private worker pool with system_unbound_wq. Signed-off-by: Tejun Heo Acked-by: Arjan van de Ven --- kernel/async.c | 141 +++++++++------------------------------------------------ 1 file changed, 22 insertions(+), 119 deletions(-) diff --git a/kernel/async.c b/kernel/async.c index 15319d6c18fe..cd9dbb913c77 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -49,40 +49,33 @@ asynchronous and synchronous parts of the kernel. */ #include -#include #include #include #include -#include -#include -#include #include +#include #include static async_cookie_t next_cookie = 1; -#define MAX_THREADS 256 #define MAX_WORK 32768 static LIST_HEAD(async_pending); static LIST_HEAD(async_running); static DEFINE_SPINLOCK(async_lock); -static int async_enabled = 0; - struct async_entry { - struct list_head list; - async_cookie_t cookie; - async_func_ptr *func; - void *data; - struct list_head *running; + struct list_head list; + struct work_struct work; + async_cookie_t cookie; + async_func_ptr *func; + void *data; + struct list_head *running; }; static DECLARE_WAIT_QUEUE_HEAD(async_done); -static DECLARE_WAIT_QUEUE_HEAD(async_new); static atomic_t entry_count; -static atomic_t thread_count; extern int initcall_debug; @@ -117,27 +110,23 @@ static async_cookie_t lowest_in_progress(struct list_head *running) spin_unlock_irqrestore(&async_lock, flags); return ret; } + /* * pick the first pending entry and run it */ -static void run_one_entry(void) +static void async_run_entry_fn(struct work_struct *work) { + struct async_entry *entry = + container_of(work, struct async_entry, work); unsigned long flags; - struct async_entry *entry; ktime_t calltime, delta, rettime; - /* 1) pick one task from the pending queue */ - + /* 1) move self to the running queue */ spin_lock_irqsave(&async_lock, flags); - if (list_empty(&async_pending)) - goto out; - entry = list_first_entry(&async_pending, struct async_entry, list); - - /* 2) move it to the running queue */ list_move_tail(&entry->list, entry->running); spin_unlock_irqrestore(&async_lock, flags); - /* 3) run it (and print duration)*/ + /* 2) run (and print duration) */ if (initcall_debug && system_state == SYSTEM_BOOTING) { printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, entry->func, task_pid_nr(current)); @@ -153,31 +142,25 @@ static void run_one_entry(void) (long long)ktime_to_ns(delta) >> 10); } - /* 4) remove it from the running queue */ + /* 3) remove self from the running queue */ spin_lock_irqsave(&async_lock, flags); list_del(&entry->list); - /* 5) free the entry */ + /* 4) free the entry */ kfree(entry); atomic_dec(&entry_count); spin_unlock_irqrestore(&async_lock, flags); - /* 6) wake up any waiters. */ + /* 5) wake up any waiters */ wake_up(&async_done); - return; - -out: - spin_unlock_irqrestore(&async_lock, flags); } - static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) { struct async_entry *entry; unsigned long flags; async_cookie_t newcookie; - /* allow irq-off callers */ entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); @@ -186,7 +169,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l * If we're out of memory or if there's too much work * pending already, we execute synchronously. */ - if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) { + if (!entry || atomic_read(&entry_count) > MAX_WORK) { kfree(entry); spin_lock_irqsave(&async_lock, flags); newcookie = next_cookie++; @@ -196,6 +179,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l ptr(data, newcookie); return newcookie; } + INIT_WORK(&entry->work, async_run_entry_fn); entry->func = ptr; entry->data = data; entry->running = running; @@ -205,7 +189,10 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l list_add_tail(&entry->list, &async_pending); atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); - wake_up(&async_new); + + /* schedule for execution */ + queue_work(system_unbound_wq, &entry->work); + return newcookie; } @@ -312,87 +299,3 @@ void async_synchronize_cookie(async_cookie_t cookie) async_synchronize_cookie_domain(cookie, &async_running); } EXPORT_SYMBOL_GPL(async_synchronize_cookie); - - -static int async_thread(void *unused) -{ - DECLARE_WAITQUEUE(wq, current); - add_wait_queue(&async_new, &wq); - - while (!kthread_should_stop()) { - int ret = HZ; - set_current_state(TASK_INTERRUPTIBLE); - /* - * check the list head without lock.. false positives - * are dealt with inside run_one_entry() while holding - * the lock. - */ - rmb(); - if (!list_empty(&async_pending)) - run_one_entry(); - else - ret = schedule_timeout(HZ); - - if (ret == 0) { - /* - * we timed out, this means we as thread are redundant. - * we sign off and die, but we to avoid any races there - * is a last-straw check to see if work snuck in. - */ - atomic_dec(&thread_count); - wmb(); /* manager must see our departure first */ - if (list_empty(&async_pending)) - break; - /* - * woops work came in between us timing out and us - * signing off; we need to stay alive and keep working. - */ - atomic_inc(&thread_count); - } - } - remove_wait_queue(&async_new, &wq); - - return 0; -} - -static int async_manager_thread(void *unused) -{ - DECLARE_WAITQUEUE(wq, current); - add_wait_queue(&async_new, &wq); - - while (!kthread_should_stop()) { - int tc, ec; - - set_current_state(TASK_INTERRUPTIBLE); - - tc = atomic_read(&thread_count); - rmb(); - ec = atomic_read(&entry_count); - - while (tc < ec && tc < MAX_THREADS) { - if (IS_ERR(kthread_run(async_thread, NULL, "async/%i", - tc))) { - msleep(100); - continue; - } - atomic_inc(&thread_count); - tc++; - } - - schedule(); - } - remove_wait_queue(&async_new, &wq); - - return 0; -} - -static int __init async_init(void) -{ - async_enabled = - !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr")); - - WARN_ON(!async_enabled); - return 0; -} - -core_initcall(async_init); -- cgit v1.2.3-59-g8ed1b From 9f9c23644b07e7a51f6f39064e61c150be712932 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Jul 2010 11:31:20 +0200 Subject: workqueue: fix locking in retry path of maybe_create_worker() maybe_create_worker() mismanaged locking when worker creation fails and it has to retry. Fix locking and simplify lock manipulation. Signed-off-by: Tejun Heo Reported-by: Yong Zhang --- kernel/workqueue.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 20d6237d7498..aca94726e20a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1442,14 +1442,14 @@ static bool maybe_create_worker(struct global_cwq *gcwq) if (!need_to_create_worker(gcwq)) return false; restart: + spin_unlock_irq(&gcwq->lock); + /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); while (true) { struct worker *worker; - spin_unlock_irq(&gcwq->lock); - worker = create_worker(gcwq, true); if (worker) { del_timer_sync(&gcwq->mayday_timer); @@ -1462,15 +1462,13 @@ restart: if (!need_to_create_worker(gcwq)) break; - spin_unlock_irq(&gcwq->lock); __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(CREATE_COOLDOWN); - spin_lock_irq(&gcwq->lock); + if (!need_to_create_worker(gcwq)) break; } - spin_unlock_irq(&gcwq->lock); del_timer_sync(&gcwq->mayday_timer); spin_lock_irq(&gcwq->lock); if (need_to_create_worker(gcwq)) -- cgit v1.2.3-59-g8ed1b From 931ac77ef65d2d90ee1def63d2041402ec7c53ab Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 11:07:48 +0200 Subject: workqueue: fix build problem on !CONFIG_SMP Commit f3421797 (workqueue: implement unbound workqueue) incorrectly tested CONFIG_SMP as part of a C expression in alloc/free_cwqs(). As CONFIG_SMP is not defined in UP, this breaks build. Fix it by using Found during linux-next build test. Signed-off-by: Tejun Heo Reported-by: Stephen Rothwell --- kernel/workqueue.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index aca94726e20a..79a11e40f311 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2615,11 +2615,15 @@ static int alloc_cwqs(struct workqueue_struct *wq) const size_t size = sizeof(struct cpu_workqueue_struct); const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS, __alignof__(unsigned long long)); +#ifdef CONFIG_SMP + bool percpu = !(wq->flags & WQ_UNBOUND); +#else + bool percpu = false; +#endif - if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) { - /* on SMP, percpu allocator can align itself */ + if (percpu) wq->cpu_wq.pcpu = __alloc_percpu(size, align); - } else { + else { void *ptr; /* @@ -2641,7 +2645,13 @@ static int alloc_cwqs(struct workqueue_struct *wq) static void free_cwqs(struct workqueue_struct *wq) { - if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) +#ifdef CONFIG_SMP + bool percpu = !(wq->flags & WQ_UNBOUND); +#else + bool percpu = false; +#endif + + if (percpu) free_percpu(wq->cpu_wq.pcpu); else if (wq->cpu_wq.single) { /* the pointer to free is stored right after the cwq */ -- cgit v1.2.3-59-g8ed1b From f2e005aaff4878a8ea93d5fb033a21389b72579a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 15:59:09 +0200 Subject: workqueue: fix mayday_mask handling on UP All cpumasks are assumed to have cpu 0 permanently set on UP, so it can't be used to signify whether there's something to be done for the CPU. workqueue was using cpumask to track which CPU requested rescuer assistance and this led rescuer thread to think there always are pending mayday requests on UP, which resulted in infinite busy loops. This patch fixes the problem by introducing mayday_mask_t and associated helpers which wrap cpumask on SMP and emulates its behavior using bitops and unsigned long on UP. Signed-off-by: Tejun Heo Cc: Rusty Russell --- kernel/workqueue.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 79a11e40f311..c11edc9c9365 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -185,6 +185,27 @@ struct wq_flusher { struct completion done; /* flush completion */ }; +/* + * All cpumasks are assumed to be always set on UP and thus can't be + * used to determine whether there's something to be done. + */ +#ifdef CONFIG_SMP +typedef cpumask_var_t mayday_mask_t; +#define mayday_test_and_set_cpu(cpu, mask) \ + cpumask_test_and_set_cpu((cpu), (mask)) +#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) +#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) +#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) +#define free_mayday_mask(mask) free_cpumask_var((mask)) +#else +typedef unsigned long mayday_mask_t; +#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask)) +#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask)) +#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask)) +#define alloc_mayday_mask(maskp, gfp) true +#define free_mayday_mask(mask) do { } while (0) +#endif + /* * The externally visible workqueue abstraction is an array of * per-CPU workqueues: @@ -206,7 +227,7 @@ struct workqueue_struct { struct list_head flusher_queue; /* F: flush waiters */ struct list_head flusher_overflow; /* F: flush overflow list */ - cpumask_var_t mayday_mask; /* cpus requesting rescue */ + mayday_mask_t mayday_mask; /* cpus requesting rescue */ struct worker *rescuer; /* I: rescue worker */ int saved_max_active; /* W: saved cwq max_active */ @@ -1387,7 +1408,7 @@ static bool send_mayday(struct work_struct *work) /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ if (cpu == WORK_CPU_UNBOUND) cpu = 0; - if (!cpumask_test_and_set_cpu(cpu, wq->mayday_mask)) + if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask)) wake_up_process(wq->rescuer->task); return true; } @@ -1915,14 +1936,14 @@ repeat: * See whether any cpu is asking for help. Unbounded * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. */ - for_each_cpu(cpu, wq->mayday_mask) { + for_each_mayday_cpu(cpu, wq->mayday_mask) { unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); struct global_cwq *gcwq = cwq->gcwq; struct work_struct *work, *n; __set_current_state(TASK_RUNNING); - cpumask_clear_cpu(cpu, wq->mayday_mask); + mayday_clear_cpu(cpu, wq->mayday_mask); /* migrate to the target cpu if possible */ rescuer->gcwq = gcwq; @@ -2724,7 +2745,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, if (flags & WQ_RESCUER) { struct worker *rescuer; - if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL)) + if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL)) goto err; wq->rescuer = rescuer = alloc_worker(); @@ -2759,7 +2780,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, err: if (wq) { free_cwqs(wq); - free_cpumask_var(wq->mayday_mask); + free_mayday_mask(wq->mayday_mask); kfree(wq->rescuer); kfree(wq); } @@ -2800,7 +2821,7 @@ void destroy_workqueue(struct workqueue_struct *wq) if (wq->flags & WQ_RESCUER) { kthread_stop(wq->rescuer->task); - free_cpumask_var(wq->mayday_mask); + free_mayday_mask(wq->mayday_mask); } free_cwqs(wq); -- cgit v1.2.3-59-g8ed1b From e120153ddf8620fd0a194d301e9c5a8b28483bb5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 22 Jul 2010 14:14:25 +0200 Subject: workqueue: fix how cpu number is stored in work->data Once a work starts execution, its data contains the cpu number it was on instead of pointing to cwq. This is added by commit 7a22ad75 (workqueue: carry cpu number in work data once execution starts) to reliably determine the work was last on even if the workqueue itself was destroyed inbetween. Whether data points to a cwq or contains a cpu number was distinguished by comparing the value against PAGE_OFFSET. The assumption was that a cpu number should be below PAGE_OFFSET while a pointer to cwq should be above it. However, on architectures which use separate address spaces for user and kernel spaces, this doesn't hold as PAGE_OFFSET is zero. Fix it by using an explicit flag, WORK_STRUCT_CWQ, to mark what the data field contains. If the flag is set, it's pointing to a cwq; otherwise, it contains a cpu number. Reported on s390 and microblaze during linux-next testing. Signed-off-by: Tejun Heo Reported-by: Sachin Sant Reported-by: Michal Simek Reported-by: Martin Schwidefsky Tested-by: Martin Schwidefsky Tested-by: Michal Simek --- include/linux/workqueue.h | 14 ++++++++------ kernel/workqueue.c | 36 +++++++++++++----------------------- 2 files changed, 21 insertions(+), 29 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d74a529ed13e..5f76001c4e6d 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -25,17 +25,19 @@ typedef void (*work_func_t)(struct work_struct *work); enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ - WORK_STRUCT_LINKED_BIT = 1, /* next work is linked to this one */ + WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ + WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK - WORK_STRUCT_STATIC_BIT = 2, /* static initializer (debugobjects) */ - WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ + WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ + WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ #else - WORK_STRUCT_COLOR_SHIFT = 2, /* color for workqueue flushing */ + WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ #endif WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, + WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, @@ -56,8 +58,8 @@ enum { WORK_CPU_LAST = WORK_CPU_NONE, /* - * Reserve 6 bits off of cwq pointer w/ debugobjects turned - * off. This makes cwqs aligned to 64 bytes which isn't too + * Reserve 7 bits off of cwq pointer w/ debugobjects turned + * off. This makes cwqs aligned to 128 bytes which isn't too * excessive while allowing 15 workqueue flush colors. */ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c11edc9c9365..e5cb7faac58e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -468,10 +468,9 @@ static int work_next_color(int color) } /* - * Work data points to the cwq while a work is on queue. Once - * execution starts, it points to the cpu the work was last on. This - * can be distinguished by comparing the data value against - * PAGE_OFFSET. + * A work's data points to the cwq with WORK_STRUCT_CWQ set while the + * work is on queue. Once execution starts, WORK_STRUCT_CWQ is + * cleared and the work data contains the cpu number it was last on. * * set_work_{cwq|cpu}() and clear_work_data() can be used to set the * cwq, cpu or clear work->data. These functions should only be @@ -494,7 +493,7 @@ static void set_work_cwq(struct work_struct *work, unsigned long extra_flags) { set_work_data(work, (unsigned long)cwq, - WORK_STRUCT_PENDING | extra_flags); + WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); } static void set_work_cpu(struct work_struct *work, unsigned int cpu) @@ -507,25 +506,24 @@ static void clear_work_data(struct work_struct *work) set_work_data(work, WORK_STRUCT_NO_CPU, 0); } -static inline unsigned long get_work_data(struct work_struct *work) -{ - return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK; -} - static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) { - unsigned long data = get_work_data(work); + unsigned long data = atomic_long_read(&work->data); - return data >= PAGE_OFFSET ? (void *)data : NULL; + if (data & WORK_STRUCT_CWQ) + return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); + else + return NULL; } static struct global_cwq *get_work_gcwq(struct work_struct *work) { - unsigned long data = get_work_data(work); + unsigned long data = atomic_long_read(&work->data); unsigned int cpu; - if (data >= PAGE_OFFSET) - return ((struct cpu_workqueue_struct *)data)->gcwq; + if (data & WORK_STRUCT_CWQ) + return ((struct cpu_workqueue_struct *) + (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq; cpu = data >> WORK_STRUCT_FLAG_BITS; if (cpu == WORK_CPU_NONE) @@ -3501,14 +3499,6 @@ void __init init_workqueues(void) unsigned int cpu; int i; - /* - * The pointer part of work->data is either pointing to the - * cwq or contains the cpu number the work ran last on. Make - * sure cpu number won't overflow into kernel pointer area so - * that they can be distinguished. - */ - BUILD_BUG_ON(WORK_CPU_LAST << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET); - hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); /* initialize gcwqs */ -- cgit v1.2.3-59-g8ed1b From 8b8edefa2fffbff97f9eec8b70e78ae23abad1a0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 22:09:01 +0200 Subject: fscache: convert object to use workqueue instead of slow-work Make fscache object state transition callbacks use workqueue instead of slow-work. New dedicated unbound CPU workqueue fscache_object_wq is created. get/put callbacks are renamed and modified to take @object and called directly from the enqueue wrapper and the work function. While at it, make all open coded instances of get/put to use fscache_get/put_object(). * Unbound workqueue is used. * work_busy() output is printed instead of slow-work flags in object debugging outputs. They mean basically the same thing bit-for-bit. * sysctl fscache.object_max_active added to control concurrency. The default value is nr_cpus clamped between 4 and WQ_UNBOUND_MAX_ACTIVE. * slow_work_sleep_till_thread_needed() is replaced with fscache private implementation fscache_object_sleep_till_congested() which waits on fscache_object_wq congestion. * debugfs support is dropped for now. Tracing API based debug facility is planned to be added. Signed-off-by: Tejun Heo Acked-by: David Howells --- Documentation/filesystems/caching/fscache.txt | 10 +-- fs/cachefiles/namei.c | 13 ++-- fs/fscache/internal.h | 7 ++ fs/fscache/main.c | 76 ++++++++++++++++++ fs/fscache/object-list.c | 11 ++- fs/fscache/object.c | 106 +++++++++++++------------- include/linux/fscache-cache.h | 9 ++- 7 files changed, 158 insertions(+), 74 deletions(-) diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt index a91e2e2095b0..770267af5b3e 100644 --- a/Documentation/filesystems/caching/fscache.txt +++ b/Documentation/filesystems/caching/fscache.txt @@ -343,8 +343,8 @@ This will look something like: [root@andromeda ~]# head /proc/fs/fscache/objects OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS EM EV F S | NETFS_COOKIE_DEF TY FL NETFS_DATA OBJECT_KEY, AUX_DATA ======== ======== ==== ===== === === === == ===== == == = = | ================ == == ================ ================ - 17e4b 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a - 1693a 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a + 17e4b 2 ACTV 0 0 0 0 0 0 7b 4 0 0 | NFS.fh DT 0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a + 1693a 2 ACTV 0 0 0 0 0 0 7b 4 0 0 | NFS.fh DT 0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a where the first set of columns before the '|' describe the object: @@ -362,7 +362,7 @@ where the first set of columns before the '|' describe the object: EM Object's event mask EV Events raised on this object F Object flags - S Object slow-work work item flags + S Object work item busy state mask (1:pending 2:running) and the second set of columns describe the object's cookie, if present: @@ -395,8 +395,8 @@ and the following paired letters: w Show objects that don't have pending writes R Show objects that have outstanding reads r Show objects that don't have outstanding reads - S Show objects that have slow work queued - s Show objects that don't have slow work queued + S Show objects that have work queued + s Show objects that don't have work queued If neither side of a letter pair is given, then both are implied. For example: diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index f4a7840bf42c..42c7fafc8bfe 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -37,9 +37,9 @@ void __cachefiles_printk_object(struct cachefiles_object *object, printk(KERN_ERR "%sobject: OBJ%x\n", prefix, object->fscache.debug_id); - printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n", + printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", prefix, fscache_object_states[object->fscache.state], - object->fscache.flags, object->fscache.work.flags, + object->fscache.flags, work_busy(&object->fscache.work), object->fscache.events, object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK); printk(KERN_ERR "%sops=%u inp=%u exc=%u\n", @@ -212,7 +212,7 @@ wait_for_old_object: /* if the object we're waiting for is queued for processing, * then just put ourselves on the queue behind it */ - if (slow_work_is_queued(&xobject->fscache.work)) { + if (work_pending(&xobject->fscache.work)) { _debug("queue OBJ%x behind OBJ%x immediately", object->fscache.debug_id, xobject->fscache.debug_id); @@ -220,8 +220,7 @@ wait_for_old_object: } /* otherwise we sleep until either the object we're waiting for - * is done, or the slow-work facility wants the thread back to - * do other work */ + * is done, or the fscache_object is congested */ wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); init_wait(&wait); requeue = false; @@ -229,8 +228,8 @@ wait_for_old_object: prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) break; - requeue = slow_work_sleep_till_thread_needed( - &object->fscache.work, &timeout); + + requeue = fscache_object_sleep_till_congested(&timeout); } while (timeout > 0 && !requeue); finish_wait(wq, &wait); diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index edd7434ab6e5..6e0b5fb25231 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -82,6 +82,13 @@ extern unsigned fscache_defer_lookup; extern unsigned fscache_defer_create; extern unsigned fscache_debug; extern struct kobject *fscache_root; +extern struct workqueue_struct *fscache_object_wq; +DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); + +static inline bool fscache_object_congested(void) +{ + return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq); +} extern int fscache_wait_bit(void *); extern int fscache_wait_bit_interruptible(void *); diff --git a/fs/fscache/main.c b/fs/fscache/main.c index add6bdb53f04..bb8d4c35c7a2 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "internal.h" MODULE_DESCRIPTION("FS Cache Manager"); @@ -40,22 +41,89 @@ MODULE_PARM_DESC(fscache_debug, "FS-Cache debugging mask"); struct kobject *fscache_root; +struct workqueue_struct *fscache_object_wq; + +DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); + +/* these values serve as lower bounds, will be adjusted in fscache_init() */ +static unsigned fscache_object_max_active = 4; + +#ifdef CONFIG_SYSCTL +static struct ctl_table_header *fscache_sysctl_header; + +static int fscache_max_active_sysctl(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + struct workqueue_struct **wqp = table->extra1; + unsigned int *datap = table->data; + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (ret == 0) + workqueue_set_max_active(*wqp, *datap); + return ret; +} + +ctl_table fscache_sysctls[] = { + { + .procname = "object_max_active", + .data = &fscache_object_max_active, + .maxlen = sizeof(unsigned), + .mode = 0644, + .proc_handler = fscache_max_active_sysctl, + .extra1 = &fscache_object_wq, + }, + {} +}; + +ctl_table fscache_sysctls_root[] = { + { + .procname = "fscache", + .mode = 0555, + .child = fscache_sysctls, + }, + {} +}; +#endif /* * initialise the fs caching module */ static int __init fscache_init(void) { + unsigned int nr_cpus = num_possible_cpus(); + unsigned int cpu; int ret; ret = slow_work_register_user(THIS_MODULE); if (ret < 0) goto error_slow_work; + fscache_object_max_active = + clamp_val(nr_cpus, + fscache_object_max_active, WQ_UNBOUND_MAX_ACTIVE); + + ret = -ENOMEM; + fscache_object_wq = alloc_workqueue("fscache_object", WQ_UNBOUND, + fscache_object_max_active); + if (!fscache_object_wq) + goto error_object_wq; + + for_each_possible_cpu(cpu) + init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); + ret = fscache_proc_init(); if (ret < 0) goto error_proc; +#ifdef CONFIG_SYSCTL + ret = -ENOMEM; + fscache_sysctl_header = register_sysctl_table(fscache_sysctls_root); + if (!fscache_sysctl_header) + goto error_sysctl; +#endif + fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar", sizeof(struct fscache_cookie), 0, @@ -78,8 +146,14 @@ static int __init fscache_init(void) error_kobj: kmem_cache_destroy(fscache_cookie_jar); error_cookie_jar: +#ifdef CONFIG_SYSCTL + unregister_sysctl_table(fscache_sysctl_header); +error_sysctl: +#endif fscache_proc_cleanup(); error_proc: + destroy_workqueue(fscache_object_wq); +error_object_wq: slow_work_unregister_user(THIS_MODULE); error_slow_work: return ret; @@ -96,7 +170,9 @@ static void __exit fscache_exit(void) kobject_put(fscache_root); kmem_cache_destroy(fscache_cookie_jar); + unregister_sysctl_table(fscache_sysctl_header); fscache_proc_cleanup(); + destroy_workqueue(fscache_object_wq); slow_work_unregister_user(THIS_MODULE); printk(KERN_NOTICE "FS-Cache: Unloaded\n"); } diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c index 4a8eb31c5338..ebe29c581380 100644 --- a/fs/fscache/object-list.c +++ b/fs/fscache/object-list.c @@ -34,8 +34,8 @@ struct fscache_objlist_data { #define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */ #define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */ #define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */ -#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with slow work */ -#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without slow work */ +#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with work */ +#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without work */ u8 buf[512]; /* key and aux data buffer */ }; @@ -231,12 +231,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v) READS, NOREADS); FILTER(obj->events & obj->event_mask, EVENTS, NOEVENTS); - FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW), - WORK, NOWORK); + FILTER(work_busy(&obj->work), WORK, NOWORK); } seq_printf(m, - "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ", + "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1x | ", obj->debug_id, obj->parent ? obj->parent->debug_id : -1, fscache_object_states_short[obj->state], @@ -249,7 +248,7 @@ static int fscache_objlist_show(struct seq_file *m, void *v) obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK, obj->events, obj->flags, - obj->work.flags); + work_busy(&obj->work)); no_cookie = true; keylen = auxlen = 0; diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 0b589a9b4ffc..b6b897c550ac 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -14,7 +14,6 @@ #define FSCACHE_DEBUG_LEVEL COOKIE #include -#include #include "internal.h" const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = { @@ -50,12 +49,8 @@ const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = { [FSCACHE_OBJECT_DEAD] = "DEAD", }; -static void fscache_object_slow_work_put_ref(struct slow_work *); -static int fscache_object_slow_work_get_ref(struct slow_work *); -static void fscache_object_slow_work_execute(struct slow_work *); -#ifdef CONFIG_SLOW_WORK_DEBUG -static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *); -#endif +static int fscache_get_object(struct fscache_object *); +static void fscache_put_object(struct fscache_object *); static void fscache_initialise_object(struct fscache_object *); static void fscache_lookup_object(struct fscache_object *); static void fscache_object_available(struct fscache_object *); @@ -64,17 +59,6 @@ static void fscache_withdraw_object(struct fscache_object *); static void fscache_enqueue_dependents(struct fscache_object *); static void fscache_dequeue_object(struct fscache_object *); -const struct slow_work_ops fscache_object_slow_work_ops = { - .owner = THIS_MODULE, - .get_ref = fscache_object_slow_work_get_ref, - .put_ref = fscache_object_slow_work_put_ref, - .execute = fscache_object_slow_work_execute, -#ifdef CONFIG_SLOW_WORK_DEBUG - .desc = fscache_object_slow_work_desc, -#endif -}; -EXPORT_SYMBOL(fscache_object_slow_work_ops); - /* * we need to notify the parent when an op completes that we had outstanding * upon it @@ -345,7 +329,7 @@ unsupported_event: /* * execute an object */ -static void fscache_object_slow_work_execute(struct slow_work *work) +void fscache_object_work_func(struct work_struct *work) { struct fscache_object *object = container_of(work, struct fscache_object, work); @@ -359,23 +343,9 @@ static void fscache_object_slow_work_execute(struct slow_work *work) if (object->events & object->event_mask) fscache_enqueue_object(object); clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); + fscache_put_object(object); } - -/* - * describe an object for slow-work debugging - */ -#ifdef CONFIG_SLOW_WORK_DEBUG -static void fscache_object_slow_work_desc(struct slow_work *work, - struct seq_file *m) -{ - struct fscache_object *object = - container_of(work, struct fscache_object, work); - - seq_printf(m, "FSC: OBJ%x: %s", - object->debug_id, - fscache_object_states_short[object->state]); -} -#endif +EXPORT_SYMBOL(fscache_object_work_func); /* * initialise an object @@ -393,7 +363,6 @@ static void fscache_initialise_object(struct fscache_object *object) _enter(""); ASSERT(object->cookie != NULL); ASSERT(object->cookie->parent != NULL); - ASSERT(list_empty(&object->work.link)); if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) | (1 << FSCACHE_OBJECT_EV_RELEASE) | @@ -671,10 +640,8 @@ static void fscache_drop_object(struct fscache_object *object) object->parent = NULL; } - /* this just shifts the object release to the slow work processor */ - fscache_stat(&fscache_n_cop_put_object); - object->cache->ops->put_object(object); - fscache_stat_d(&fscache_n_cop_put_object); + /* this just shifts the object release to the work processor */ + fscache_put_object(object); _leave(""); } @@ -758,12 +725,10 @@ void fscache_withdrawing_object(struct fscache_cache *cache, } /* - * allow the slow work item processor to get a ref on an object + * get a ref on an object */ -static int fscache_object_slow_work_get_ref(struct slow_work *work) +static int fscache_get_object(struct fscache_object *object) { - struct fscache_object *object = - container_of(work, struct fscache_object, work); int ret; fscache_stat(&fscache_n_cop_grab_object); @@ -773,13 +738,10 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work) } /* - * allow the slow work item processor to discard a ref on a work item + * discard a ref on a work item */ -static void fscache_object_slow_work_put_ref(struct slow_work *work) +static void fscache_put_object(struct fscache_object *object) { - struct fscache_object *object = - container_of(work, struct fscache_object, work); - fscache_stat(&fscache_n_cop_put_object); object->cache->ops->put_object(object); fscache_stat_d(&fscache_n_cop_put_object); @@ -792,8 +754,48 @@ void fscache_enqueue_object(struct fscache_object *object) { _enter("{OBJ%x}", object->debug_id); - slow_work_enqueue(&object->work); + if (fscache_get_object(object) >= 0) { + wait_queue_head_t *cong_wq = + &get_cpu_var(fscache_object_cong_wait); + + if (queue_work(fscache_object_wq, &object->work)) { + if (fscache_object_congested()) + wake_up(cong_wq); + } else + fscache_put_object(object); + + put_cpu_var(fscache_object_cong_wait); + } +} + +/** + * fscache_object_sleep_till_congested - Sleep until object wq is congested + * @timoutp: Scheduler sleep timeout + * + * Allow an object handler to sleep until the object workqueue is congested. + * + * The caller must set up a wake up event before calling this and must have set + * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own + * condition before calling this function as no test is made here. + * + * %true is returned if the object wq is congested, %false otherwise. + */ +bool fscache_object_sleep_till_congested(signed long *timeoutp) +{ + wait_queue_head_t *cong_wq = &__get_cpu_var(fscache_object_cong_wait); + DEFINE_WAIT(wait); + + if (fscache_object_congested()) + return true; + + add_wait_queue_exclusive(cong_wq, &wait); + if (!fscache_object_congested()) + *timeoutp = schedule_timeout(*timeoutp); + finish_wait(cong_wq, &wait); + + return fscache_object_congested(); } +EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested); /* * enqueue the dependents of an object for metadata-type processing @@ -819,9 +821,7 @@ static void fscache_enqueue_dependents(struct fscache_object *object) /* sort onto appropriate lists */ fscache_enqueue_object(dep); - fscache_stat(&fscache_n_cop_put_object); - dep->cache->ops->put_object(dep); - fscache_stat_d(&fscache_n_cop_put_object); + fscache_put_object(dep); if (!list_empty(&object->dependents)) cond_resched_lock(&object->lock); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index c57db27ac861..27c8df503152 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -21,6 +21,7 @@ #include #include #include +#include #define NR_MAXCACHES BITS_PER_LONG @@ -389,7 +390,7 @@ struct fscache_object { struct fscache_cache *cache; /* cache that supplied this object */ struct fscache_cookie *cookie; /* netfs's file/index object */ struct fscache_object *parent; /* parent object */ - struct slow_work work; /* attention scheduling record */ + struct work_struct work; /* attention scheduling record */ struct list_head dependents; /* FIFO of dependent objects */ struct list_head dep_link; /* link in parent's dependents list */ struct list_head pending_ops; /* unstarted operations on this object */ @@ -411,7 +412,7 @@ extern const char *fscache_object_states[]; (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ (obj)->state >= FSCACHE_OBJECT_DYING) -extern const struct slow_work_ops fscache_object_slow_work_ops; +extern void fscache_object_work_func(struct work_struct *work); /** * fscache_object_init - Initialise a cache object description @@ -433,7 +434,7 @@ void fscache_object_init(struct fscache_object *object, spin_lock_init(&object->lock); INIT_LIST_HEAD(&object->cache_link); INIT_HLIST_NODE(&object->cookie_link); - vslow_work_init(&object->work, &fscache_object_slow_work_ops); + INIT_WORK(&object->work, fscache_object_work_func); INIT_LIST_HEAD(&object->dependents); INIT_LIST_HEAD(&object->dep_link); INIT_LIST_HEAD(&object->pending_ops); @@ -534,6 +535,8 @@ extern void fscache_io_error(struct fscache_cache *cache); extern void fscache_mark_pages_cached(struct fscache_retrieval *op, struct pagevec *pagevec); +extern bool fscache_object_sleep_till_congested(signed long *timeoutp); + extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, const void *data, uint16_t datalen); -- cgit v1.2.3-59-g8ed1b From 8af7c12436803291c90295259db23d371a7ad9cc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 22:09:01 +0200 Subject: fscache: convert operation to use workqueue instead of slow-work Make fscache operation to use only workqueue instead of combination of workqueue and slow-work. FSCACHE_OP_SLOW is dropped and FSCACHE_OP_FAST is renamed to FSCACHE_OP_ASYNC and uses newly added fscache_op_wq workqueue to execute op->processor(). fscache_operation_init_slow() is dropped and fscache_operation_init() now takes @processor argument directly. * Unbound workqueue is used. * fscache_retrieval_work() is no longer necessary as OP_ASYNC now does the equivalent thing. * sysctl fscache.operation_max_active added to control concurrency. The default value is nr_cpus clamped between 2 and WQ_UNBOUND_MAX_ACTIVE. * debugfs support is dropped for now. Tracing API based debug facility is planned to be added. Signed-off-by: Tejun Heo Acked-by: David Howells --- fs/cachefiles/rdwr.c | 4 +-- fs/fscache/internal.h | 1 + fs/fscache/main.c | 23 +++++++++++++++ fs/fscache/operation.c | 67 ++++++------------------------------------- fs/fscache/page.c | 36 ++++++----------------- include/linux/fscache-cache.h | 37 +++++++----------------- 6 files changed, 53 insertions(+), 115 deletions(-) diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 0f0d41fbb03f..0e3c0924cc3a 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -422,7 +422,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; op->op.flags &= FSCACHE_OP_KEEP_FLAGS; - op->op.flags |= FSCACHE_OP_FAST; + op->op.flags |= FSCACHE_OP_ASYNC; op->op.processor = cachefiles_read_copier; pagevec_init(&pagevec, 0); @@ -729,7 +729,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, pagevec_init(&pagevec, 0); op->op.flags &= FSCACHE_OP_KEEP_FLAGS; - op->op.flags |= FSCACHE_OP_FAST; + op->op.flags |= FSCACHE_OP_ASYNC; op->op.processor = cachefiles_read_copier; INIT_LIST_HEAD(&backpages); diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 6e0b5fb25231..6a026441c5a6 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -83,6 +83,7 @@ extern unsigned fscache_defer_create; extern unsigned fscache_debug; extern struct kobject *fscache_root; extern struct workqueue_struct *fscache_object_wq; +extern struct workqueue_struct *fscache_op_wq; DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); static inline bool fscache_object_congested(void) diff --git a/fs/fscache/main.c b/fs/fscache/main.c index bb8d4c35c7a2..44d13ddab2cc 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -42,11 +42,13 @@ MODULE_PARM_DESC(fscache_debug, struct kobject *fscache_root; struct workqueue_struct *fscache_object_wq; +struct workqueue_struct *fscache_op_wq; DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); /* these values serve as lower bounds, will be adjusted in fscache_init() */ static unsigned fscache_object_max_active = 4; +static unsigned fscache_op_max_active = 2; #ifdef CONFIG_SYSCTL static struct ctl_table_header *fscache_sysctl_header; @@ -74,6 +76,14 @@ ctl_table fscache_sysctls[] = { .proc_handler = fscache_max_active_sysctl, .extra1 = &fscache_object_wq, }, + { + .procname = "operation_max_active", + .data = &fscache_op_max_active, + .maxlen = sizeof(unsigned), + .mode = 0644, + .proc_handler = fscache_max_active_sysctl, + .extra1 = &fscache_op_wq, + }, {} }; @@ -110,6 +120,16 @@ static int __init fscache_init(void) if (!fscache_object_wq) goto error_object_wq; + fscache_op_max_active = + clamp_val(fscache_object_max_active / 2, + fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE); + + ret = -ENOMEM; + fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND, + fscache_op_max_active); + if (!fscache_op_wq) + goto error_op_wq; + for_each_possible_cpu(cpu) init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); @@ -152,6 +172,8 @@ error_sysctl: #endif fscache_proc_cleanup(); error_proc: + destroy_workqueue(fscache_op_wq); +error_op_wq: destroy_workqueue(fscache_object_wq); error_object_wq: slow_work_unregister_user(THIS_MODULE); @@ -172,6 +194,7 @@ static void __exit fscache_exit(void) kmem_cache_destroy(fscache_cookie_jar); unregister_sysctl_table(fscache_sysctl_header); fscache_proc_cleanup(); + destroy_workqueue(fscache_op_wq); destroy_workqueue(fscache_object_wq); slow_work_unregister_user(THIS_MODULE); printk(KERN_NOTICE "FS-Cache: Unloaded\n"); diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index f17cecafae44..b9f34eaede09 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -42,16 +42,12 @@ void fscache_enqueue_operation(struct fscache_operation *op) fscache_stat(&fscache_n_op_enqueue); switch (op->flags & FSCACHE_OP_TYPE) { - case FSCACHE_OP_FAST: - _debug("queue fast"); + case FSCACHE_OP_ASYNC: + _debug("queue async"); atomic_inc(&op->usage); - if (!schedule_work(&op->fast_work)) + if (!queue_work(fscache_op_wq, &op->work)) fscache_put_operation(op); break; - case FSCACHE_OP_SLOW: - _debug("queue slow"); - slow_work_enqueue(&op->slow_work); - break; case FSCACHE_OP_MYTHREAD: _debug("queue for caller's attention"); break; @@ -455,36 +451,13 @@ void fscache_operation_gc(struct work_struct *work) } /* - * allow the slow work item processor to get a ref on an operation - */ -static int fscache_op_get_ref(struct slow_work *work) -{ - struct fscache_operation *op = - container_of(work, struct fscache_operation, slow_work); - - atomic_inc(&op->usage); - return 0; -} - -/* - * allow the slow work item processor to discard a ref on an operation - */ -static void fscache_op_put_ref(struct slow_work *work) -{ - struct fscache_operation *op = - container_of(work, struct fscache_operation, slow_work); - - fscache_put_operation(op); -} - -/* - * execute an operation using the slow thread pool to provide processing context - * - the caller holds a ref to this object, so we don't need to hold one + * execute an operation using fs_op_wq to provide processing context - + * the caller holds a ref to this object, so we don't need to hold one */ -static void fscache_op_execute(struct slow_work *work) +void fscache_op_work_func(struct work_struct *work) { struct fscache_operation *op = - container_of(work, struct fscache_operation, slow_work); + container_of(work, struct fscache_operation, work); unsigned long start; _enter("{OBJ%x OP%x,%d}", @@ -494,31 +467,7 @@ static void fscache_op_execute(struct slow_work *work) start = jiffies; op->processor(op); fscache_hist(fscache_ops_histogram, start); + fscache_put_operation(op); _leave(""); } - -/* - * describe an operation for slow-work debugging - */ -#ifdef CONFIG_SLOW_WORK_DEBUG -static void fscache_op_desc(struct slow_work *work, struct seq_file *m) -{ - struct fscache_operation *op = - container_of(work, struct fscache_operation, slow_work); - - seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx", - op->object->debug_id, op->debug_id, - op->name, op->state, op->flags); -} -#endif - -const struct slow_work_ops fscache_op_slow_work_ops = { - .owner = THIS_MODULE, - .get_ref = fscache_op_get_ref, - .put_ref = fscache_op_put_ref, - .execute = fscache_op_execute, -#ifdef CONFIG_SLOW_WORK_DEBUG - .desc = fscache_op_desc, -#endif -}; diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 723b889fd219..41c441c2058d 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -105,7 +105,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, page_busy: /* we might want to wait here, but that could deadlock the allocator as - * the slow-work threads writing to the cache may all end up sleeping + * the work threads writing to the cache may all end up sleeping * on memory allocation */ fscache_stat(&fscache_n_store_vmscan_busy); return false; @@ -188,9 +188,8 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) return -ENOMEM; } - fscache_operation_init(op, NULL); - fscache_operation_init_slow(op, fscache_attr_changed_op); - op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); + fscache_operation_init(op, fscache_attr_changed_op, NULL); + op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); fscache_set_op_name(op, "Attr"); spin_lock(&cookie->lock); @@ -217,24 +216,6 @@ nobufs: } EXPORT_SYMBOL(__fscache_attr_changed); -/* - * handle secondary execution given to a retrieval op on behalf of the - * cache - */ -static void fscache_retrieval_work(struct work_struct *work) -{ - struct fscache_retrieval *op = - container_of(work, struct fscache_retrieval, op.fast_work); - unsigned long start; - - _enter("{OP%x}", op->op.debug_id); - - start = jiffies; - op->op.processor(&op->op); - fscache_hist(fscache_ops_histogram, start); - fscache_put_operation(&op->op); -} - /* * release a retrieval op reference */ @@ -269,13 +250,12 @@ static struct fscache_retrieval *fscache_alloc_retrieval( return NULL; } - fscache_operation_init(&op->op, fscache_release_retrieval_op); + fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op); op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); op->mapping = mapping; op->end_io_func = end_io_func; op->context = context; op->start_time = jiffies; - INIT_WORK(&op->op.fast_work, fscache_retrieval_work); INIT_LIST_HEAD(&op->to_do); fscache_set_op_name(&op->op, "Retr"); return op; @@ -795,9 +775,9 @@ int __fscache_write_page(struct fscache_cookie *cookie, if (!op) goto nomem; - fscache_operation_init(&op->op, fscache_release_write_op); - fscache_operation_init_slow(&op->op, fscache_write_op); - op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); + fscache_operation_init(&op->op, fscache_write_op, + fscache_release_write_op); + op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING); fscache_set_op_name(&op->op, "Write1"); ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); @@ -852,7 +832,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, fscache_stat(&fscache_n_store_ops); fscache_stat(&fscache_n_stores_ok); - /* the slow work queue now carries its own ref on the object */ + /* the work queue now carries its own ref on the object */ fscache_put_operation(&op->op); _leave(" = 0"); return 0; diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 27c8df503152..17ed9c1dbfbe 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -77,18 +77,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op); typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); struct fscache_operation { - union { - struct work_struct fast_work; /* record for fast ops */ - struct slow_work slow_work; /* record for (very) slow ops */ - }; + struct work_struct work; /* record for async ops */ struct list_head pend_link; /* link in object->pending_ops */ struct fscache_object *object; /* object to be operated upon */ unsigned long flags; #define FSCACHE_OP_TYPE 0x000f /* operation type */ -#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ -#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ -#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ +#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */ +#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ #define FSCACHE_OP_DEAD 6 /* op is now dead */ @@ -106,7 +102,8 @@ struct fscache_operation { /* operation releaser */ fscache_operation_release_t release; -#ifdef CONFIG_SLOW_WORK_DEBUG +#ifdef CONFIG_WORKQUEUE_DEBUGFS + struct work_struct put_work; /* work to delay operation put */ const char *name; /* operation name */ const char *state; /* operation state */ #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) @@ -118,7 +115,7 @@ struct fscache_operation { }; extern atomic_t fscache_op_debug_id; -extern const struct slow_work_ops fscache_op_slow_work_ops; +extern void fscache_op_work_func(struct work_struct *work); extern void fscache_enqueue_operation(struct fscache_operation *); extern void fscache_put_operation(struct fscache_operation *); @@ -129,33 +126,21 @@ extern void fscache_put_operation(struct fscache_operation *); * @release: The release function to assign * * Do basic initialisation of an operation. The caller must still set flags, - * object, either fast_work or slow_work if necessary, and processor if needed. + * object and processor if needed. */ static inline void fscache_operation_init(struct fscache_operation *op, - fscache_operation_release_t release) + fscache_operation_processor_t processor, + fscache_operation_release_t release) { + INIT_WORK(&op->work, fscache_op_work_func); atomic_set(&op->usage, 1); op->debug_id = atomic_inc_return(&fscache_op_debug_id); + op->processor = processor; op->release = release; INIT_LIST_HEAD(&op->pend_link); fscache_set_op_state(op, "Init"); } -/** - * fscache_operation_init_slow - Do additional initialisation of a slow op - * @op: The operation to initialise - * @processor: The processor function to assign - * - * Do additional initialisation of an operation as required for slow work. - */ -static inline -void fscache_operation_init_slow(struct fscache_operation *op, - fscache_operation_processor_t processor) -{ - op->processor = processor; - slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); -} - /* * data read operation */ -- cgit v1.2.3-59-g8ed1b From d098adfb7d281258173a43151483e52e21761021 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 22:09:01 +0200 Subject: fscache: drop references to slow-work fscache no longer uses slow-work. Drop references to it. Signed-off-by: Tejun Heo Acked-by: David Howells --- fs/fscache/Kconfig | 1 - fs/fscache/main.c | 7 ------- include/linux/fscache-cache.h | 1 - 3 files changed, 9 deletions(-) diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig index cc94bb9563f2..3f6dfa989881 100644 --- a/fs/fscache/Kconfig +++ b/fs/fscache/Kconfig @@ -1,7 +1,6 @@ config FSCACHE tristate "General filesystem local caching manager" - select SLOW_WORK help This option enables a generic filesystem caching manager that can be used by various network and other filesystems to cache data locally. diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 44d13ddab2cc..500936d9fff2 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -106,10 +106,6 @@ static int __init fscache_init(void) unsigned int cpu; int ret; - ret = slow_work_register_user(THIS_MODULE); - if (ret < 0) - goto error_slow_work; - fscache_object_max_active = clamp_val(nr_cpus, fscache_object_max_active, WQ_UNBOUND_MAX_ACTIVE); @@ -176,8 +172,6 @@ error_proc: error_op_wq: destroy_workqueue(fscache_object_wq); error_object_wq: - slow_work_unregister_user(THIS_MODULE); -error_slow_work: return ret; } @@ -196,7 +190,6 @@ static void __exit fscache_exit(void) fscache_proc_cleanup(); destroy_workqueue(fscache_op_wq); destroy_workqueue(fscache_object_wq); - slow_work_unregister_user(THIS_MODULE); printk(KERN_NOTICE "FS-Cache: Unloaded\n"); } diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 17ed9c1dbfbe..b8581c09d19f 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -20,7 +20,6 @@ #include #include -#include #include #define NR_MAXCACHES BITS_PER_LONG -- cgit v1.2.3-59-g8ed1b From 9b646972467fb5fdc677f9e4251875db20bdbb64 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 22:09:02 +0200 Subject: cifs: use workqueue instead of slow-work Workqueue can now handle high concurrency. Use system_nrt_wq instead of slow-work. * Updated is_valid_oplock_break() to not call cifs_oplock_break_put() as advised by Steve French. It might cause deadlock. Instead, reference is increased after queueing succeeded and cifs_oplock_break() briefly grabs GlobalSMBSeslock before putting the cfile to make sure it doesn't put before the matching get is finished. * Anton Blanchard reported that cifs conversion was using now gone system_single_wq. Use system_nrt_wq which provides non-reentrance guarantee which is enough and much better. Signed-off-by: Tejun Heo Acked-by: Steve French Cc: Anton Blanchard --- fs/cifs/Kconfig | 1 - fs/cifs/cifsfs.c | 5 ----- fs/cifs/cifsglob.h | 8 +++++--- fs/cifs/dir.c | 2 +- fs/cifs/file.c | 30 +++++++++++++----------------- fs/cifs/misc.c | 20 ++++++++++++-------- 6 files changed, 31 insertions(+), 35 deletions(-) diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 80f352596807..6994a0f54f02 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -2,7 +2,6 @@ config CIFS tristate "CIFS support (advanced network filesystem, SMBFS successor)" depends on INET select NLS - select SLOW_WORK help This is the client VFS module for the Common Internet File System (CIFS) protocol which is the successor to the Server Message Block diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 78c02eb4cb1f..4c075177c8b6 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -917,15 +917,10 @@ init_cifs(void) if (rc) goto out_unregister_key_type; #endif - rc = slow_work_register_user(THIS_MODULE); - if (rc) - goto out_unregister_resolver_key; return 0; - out_unregister_resolver_key: #ifdef CONFIG_CIFS_DFS_UPCALL - unregister_key_type(&key_type_dns_resolver); out_unregister_key_type: #endif #ifdef CONFIG_CIFS_UPCALL diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index a88479ceaad5..f5a1f9bb3a9f 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include "cifs_fs_sb.h" #include "cifsacl.h" /* @@ -363,7 +363,7 @@ struct cifsFileInfo { atomic_t count; /* reference count */ struct mutex fh_mutex; /* prevents reopen race after dead ses*/ struct cifs_search_info srch_inf; - struct slow_work oplock_break; /* slow_work job for oplock breaks */ + struct work_struct oplock_break; /* work for oplock breaks */ }; /* Take a reference on the file private data */ @@ -732,4 +732,6 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ -extern const struct slow_work_ops cifs_oplock_break_ops; +void cifs_oplock_break(struct work_struct *work); +void cifs_oplock_break_get(struct cifsFileInfo *cfile); +void cifs_oplock_break_put(struct cifsFileInfo *cfile); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 391816b461ca..b066e73c4153 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -162,7 +162,7 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, mutex_init(&pCifsFile->lock_mutex); INIT_LIST_HEAD(&pCifsFile->llist); atomic_set(&pCifsFile->count, 1); - slow_work_init(&pCifsFile->oplock_break, &cifs_oplock_break_ops); + INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break); write_lock(&GlobalSMBSeslock); list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 75541af4b3db..e767bfa7978b 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2295,8 +2295,7 @@ out: return rc; } -static void -cifs_oplock_break(struct slow_work *work) +void cifs_oplock_break(struct work_struct *work) { struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); @@ -2333,33 +2332,30 @@ cifs_oplock_break(struct slow_work *work) LOCKING_ANDX_OPLOCK_RELEASE, false); cFYI(1, "Oplock release rc = %d", rc); } + + /* + * We might have kicked in before is_valid_oplock_break() + * finished grabbing reference for us. Make sure it's done by + * waiting for GlobalSMSSeslock. + */ + write_lock(&GlobalSMBSeslock); + write_unlock(&GlobalSMBSeslock); + + cifs_oplock_break_put(cfile); } -static int -cifs_oplock_break_get(struct slow_work *work) +void cifs_oplock_break_get(struct cifsFileInfo *cfile) { - struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, - oplock_break); mntget(cfile->mnt); cifsFileInfo_get(cfile); - return 0; } -static void -cifs_oplock_break_put(struct slow_work *work) +void cifs_oplock_break_put(struct cifsFileInfo *cfile) { - struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, - oplock_break); mntput(cfile->mnt); cifsFileInfo_put(cfile); } -const struct slow_work_ops cifs_oplock_break_ops = { - .get_ref = cifs_oplock_break_get, - .put_ref = cifs_oplock_break_put, - .execute = cifs_oplock_break, -}; - const struct address_space_operations cifs_addr_ops = { .readpage = cifs_readpage, .readpages = cifs_readpages, diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 1394aa37f26c..3ccadc1326d6 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -498,7 +498,6 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) struct cifsTconInfo *tcon; struct cifsInodeInfo *pCifsInode; struct cifsFileInfo *netfile; - int rc; cFYI(1, "Checking for oplock break or dnotify response"); if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && @@ -583,13 +582,18 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) pCifsInode->clientCanCacheAll = false; if (pSMB->OplockLevel == 0) pCifsInode->clientCanCacheRead = false; - rc = slow_work_enqueue(&netfile->oplock_break); - if (rc) { - cERROR(1, "failed to enqueue oplock " - "break: %d\n", rc); - } else { - netfile->oplock_break_cancelled = false; - } + + /* + * cifs_oplock_break_put() can't be called + * from here. Get reference after queueing + * succeeded. cifs_oplock_break() will + * synchronize using GlobalSMSSeslock. + */ + if (queue_work(system_nrt_wq, + &netfile->oplock_break)) + cifs_oplock_break_get(netfile); + netfile->oplock_break_cancelled = false; + read_unlock(&GlobalSMBSeslock); read_unlock(&cifs_tcp_ses_lock); return true; -- cgit v1.2.3-59-g8ed1b From 991ea75cb1df7188d209274b3d51c105b4f18ffe Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 22:09:02 +0200 Subject: drm: use workqueue instead of slow-work Workqueue can now handle high concurrency. Convert drm_crtc_helper to use system_nrt_wq instead of slow-work. The conversion is mostly straight forward. One difference is that drm_helper_hpd_irq_event() no longer blocks and can be called from any context. Signed-off-by: Tejun Heo Acked-by: David Airlie Cc: dri-devel@lists.freedesktop.org --- drivers/gpu/drm/drm_crtc_helper.c | 29 ++++++++++------------------- include/drm/drm_crtc.h | 3 +-- 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 9b2a54117c91..7fa33805f392 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -808,13 +808,11 @@ int drm_helper_resume_force_mode(struct drm_device *dev) } EXPORT_SYMBOL(drm_helper_resume_force_mode); -static struct slow_work_ops output_poll_ops; - #define DRM_OUTPUT_POLL_PERIOD (10*HZ) -static void output_poll_execute(struct slow_work *work) +static void output_poll_execute(struct work_struct *work) { - struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); - struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); + struct delayed_work *delayed_work = to_delayed_work(work); + struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); struct drm_connector *connector; enum drm_connector_status old_status, status; bool repoll = false, changed = false; @@ -854,7 +852,7 @@ static void output_poll_execute(struct slow_work *work) } if (repoll) { - ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); + ret = queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); if (ret) DRM_ERROR("delayed enqueue failed %d\n", ret); } @@ -864,7 +862,7 @@ void drm_kms_helper_poll_disable(struct drm_device *dev) { if (!dev->mode_config.poll_enabled) return; - delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); + cancel_delayed_work_sync(&dev->mode_config.output_poll_work); } EXPORT_SYMBOL(drm_kms_helper_poll_disable); @@ -880,7 +878,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) } if (poll) { - ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); + ret = queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); if (ret) DRM_ERROR("delayed enqueue failed %d\n", ret); } @@ -889,9 +887,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_enable); void drm_kms_helper_poll_init(struct drm_device *dev) { - slow_work_register_user(THIS_MODULE); - delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, - &output_poll_ops); + INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute); dev->mode_config.poll_enabled = true; drm_kms_helper_poll_enable(dev); @@ -901,7 +897,6 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init); void drm_kms_helper_poll_fini(struct drm_device *dev) { drm_kms_helper_poll_disable(dev); - slow_work_unregister_user(THIS_MODULE); } EXPORT_SYMBOL(drm_kms_helper_poll_fini); @@ -909,12 +904,8 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) { if (!dev->mode_config.poll_enabled) return; - delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); - /* schedule a slow work asap */ - delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); + /* kill timer and schedule immediate execution, this doesn't block */ + cancel_delayed_work(&dev->mode_config.output_poll_work); + queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); } EXPORT_SYMBOL(drm_helper_hpd_irq_event); - -static struct slow_work_ops output_poll_ops = { - .execute = output_poll_execute, -}; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 93a1a31b9c2d..c707270bff5a 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -31,7 +31,6 @@ #include #include -#include struct drm_device; struct drm_mode_set; @@ -595,7 +594,7 @@ struct drm_mode_config { /* output poll support */ bool poll_enabled; - struct delayed_slow_work output_poll_slow_work; + struct delayed_work output_poll_work; /* pointers to standard properties */ struct list_head property_blob_list; -- cgit v1.2.3-59-g8ed1b From 6ecd7c2dd9f5dd4f6e8f65c8027159f9c73b0e4c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 22:09:02 +0200 Subject: gfs2: use workqueue instead of slow-work Workqueue can now handle high concurrency. Convert gfs to use workqueue instead of slow-work. * Steven pointed out that recovery path might be run from allocation path and thus requires forward progress guarantee without memory allocation. Create and use gfs_recovery_wq with rescuer. Please note that forward progress wasn't guaranteed with slow-work. * Updated to use non-reentrant workqueue. Signed-off-by: Tejun Heo Acked-by: Steven Whitehouse --- fs/gfs2/Kconfig | 1 - fs/gfs2/incore.h | 3 +-- fs/gfs2/main.c | 14 ++++++++------ fs/gfs2/ops_fstype.c | 8 ++++---- fs/gfs2/recovery.c | 54 ++++++++++++++++++++-------------------------------- fs/gfs2/recovery.h | 6 ++++-- fs/gfs2/sys.c | 3 ++- 7 files changed, 40 insertions(+), 49 deletions(-) diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index a47b43107112..cc9665522148 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig @@ -7,7 +7,6 @@ config GFS2_FS select IP_SCTP if DLM_SCTP select FS_POSIX_ACL select CRC32 - select SLOW_WORK select QUOTACTL help A cluster filesystem. diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index b5d7363b22da..dd8f2e63d15a 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -12,7 +12,6 @@ #include #include -#include #include #include @@ -383,7 +382,7 @@ struct gfs2_journal_extent { struct gfs2_jdesc { struct list_head jd_list; struct list_head extent_list; - struct slow_work jd_work; + struct work_struct jd_work; struct inode *jd_inode; unsigned long jd_flags; #define JDF_RECOVERY 1 diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index fb2a5f93b7c3..b1e9630eb46a 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -15,7 +15,6 @@ #include #include #include -#include #include "gfs2.h" #include "incore.h" @@ -24,6 +23,7 @@ #include "util.h" #include "glock.h" #include "quota.h" +#include "recovery.h" static struct shrinker qd_shrinker = { .shrink = gfs2_shrink_qd_memory, @@ -138,9 +138,11 @@ static int __init init_gfs2_fs(void) if (error) goto fail_unregister; - error = slow_work_register_user(THIS_MODULE); - if (error) - goto fail_slow; + error = -ENOMEM; + gfs_recovery_wq = alloc_workqueue("gfs_recovery", + WQ_NON_REENTRANT | WQ_RESCUER, 0); + if (!gfs_recovery_wq) + goto fail_wq; gfs2_register_debugfs(); @@ -148,7 +150,7 @@ static int __init init_gfs2_fs(void) return 0; -fail_slow: +fail_wq: unregister_filesystem(&gfs2meta_fs_type); fail_unregister: unregister_filesystem(&gfs2_fs_type); @@ -190,7 +192,7 @@ static void __exit exit_gfs2_fs(void) gfs2_unregister_debugfs(); unregister_filesystem(&gfs2_fs_type); unregister_filesystem(&gfs2meta_fs_type); - slow_work_unregister_user(THIS_MODULE); + destroy_workqueue(gfs_recovery_wq); kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 3593b3a7290e..9a08e1bd6fbd 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include "gfs2.h" @@ -673,7 +672,7 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) break; INIT_LIST_HEAD(&jd->extent_list); - slow_work_init(&jd->jd_work, &gfs2_recover_ops); + INIT_WORK(&jd->jd_work, gfs2_recover_func); jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { if (!jd->jd_inode) @@ -782,7 +781,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) if (sdp->sd_lockstruct.ls_first) { unsigned int x; for (x = 0; x < sdp->sd_journals; x++) { - error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x)); + error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x), + true); if (error) { fs_err(sdp, "error recovering journal %u: %d\n", x, error); @@ -792,7 +792,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) gfs2_others_may_mount(sdp); } else if (!sdp->sd_args.ar_spectator) { - error = gfs2_recover_journal(sdp->sd_jdesc); + error = gfs2_recover_journal(sdp->sd_jdesc, true); if (error) { fs_err(sdp, "error recovering my journal: %d\n", error); goto fail_jinode_gh; diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 4b9bece3d437..f7f89a94a5a4 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -14,7 +14,6 @@ #include #include #include -#include #include "gfs2.h" #include "incore.h" @@ -28,6 +27,8 @@ #include "util.h" #include "dir.h" +struct workqueue_struct *gfs_recovery_wq; + int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, struct buffer_head **bh) { @@ -443,23 +444,7 @@ static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid, kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp); } -static int gfs2_recover_get_ref(struct slow_work *work) -{ - struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); - if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags)) - return -EBUSY; - return 0; -} - -static void gfs2_recover_put_ref(struct slow_work *work) -{ - struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); - clear_bit(JDF_RECOVERY, &jd->jd_flags); - smp_mb__after_clear_bit(); - wake_up_bit(&jd->jd_flags, JDF_RECOVERY); -} - -static void gfs2_recover_work(struct slow_work *work) +void gfs2_recover_func(struct work_struct *work) { struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work); struct gfs2_inode *ip = GFS2_I(jd->jd_inode); @@ -578,7 +563,7 @@ static void gfs2_recover_work(struct slow_work *work) gfs2_glock_dq_uninit(&j_gh); fs_info(sdp, "jid=%u: Done\n", jd->jd_jid); - return; + goto done; fail_gunlock_tr: gfs2_glock_dq_uninit(&t_gh); @@ -590,32 +575,35 @@ fail_gunlock_j: } fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done"); - fail: gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); +done: + clear_bit(JDF_RECOVERY, &jd->jd_flags); + smp_mb__after_clear_bit(); + wake_up_bit(&jd->jd_flags, JDF_RECOVERY); } -struct slow_work_ops gfs2_recover_ops = { - .owner = THIS_MODULE, - .get_ref = gfs2_recover_get_ref, - .put_ref = gfs2_recover_put_ref, - .execute = gfs2_recover_work, -}; - - static int gfs2_recovery_wait(void *word) { schedule(); return 0; } -int gfs2_recover_journal(struct gfs2_jdesc *jd) +int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait) { int rv; - rv = slow_work_enqueue(&jd->jd_work); - if (rv) - return rv; - wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait, TASK_UNINTERRUPTIBLE); + + if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags)) + return -EBUSY; + + /* we have JDF_RECOVERY, queue should always succeed */ + rv = queue_work(gfs_recovery_wq, &jd->jd_work); + BUG_ON(!rv); + + if (wait) + wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait, + TASK_UNINTERRUPTIBLE); + return 0; } diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 1616ac22569a..2226136c7647 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h @@ -12,6 +12,8 @@ #include "incore.h" +extern struct workqueue_struct *gfs_recovery_wq; + static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk) { if (++*blk == sdp->sd_jdesc->jd_blocks) @@ -27,8 +29,8 @@ extern void gfs2_revoke_clean(struct gfs2_sbd *sdp); extern int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head); -extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); -extern struct slow_work_ops gfs2_recover_ops; +extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); +extern void gfs2_recover_func(struct work_struct *work); #endif /* __RECOVERY_DOT_H__ */ diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 37f5393e68e6..6b60316ae327 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -25,6 +25,7 @@ #include "quota.h" #include "util.h" #include "glops.h" +#include "recovery.h" struct gfs2_attr { struct attribute attr; @@ -352,7 +353,7 @@ static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { if (jd->jd_jid != jid) continue; - rv = slow_work_enqueue(&jd->jd_work); + rv = gfs2_recover_journal(jd, false); break; } out: -- cgit v1.2.3-59-g8ed1b From 181a51f6e040d0ac006d6adaf4a031ffa440f41c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Jul 2010 22:09:02 +0200 Subject: slow-work: kill it slow-work doesn't have any user left. Kill it. Signed-off-by: Tejun Heo Acked-by: David Howells --- Documentation/slow-work.txt | 322 ------------- include/linux/slow-work.h | 163 ------- init/Kconfig | 24 - kernel/Makefile | 2 - kernel/slow-work-debugfs.c | 227 --------- kernel/slow-work.c | 1068 ------------------------------------------- kernel/slow-work.h | 72 --- kernel/sysctl.c | 8 - 8 files changed, 1886 deletions(-) delete mode 100644 Documentation/slow-work.txt delete mode 100644 include/linux/slow-work.h delete mode 100644 kernel/slow-work-debugfs.c delete mode 100644 kernel/slow-work.c delete mode 100644 kernel/slow-work.h diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt deleted file mode 100644 index 9dbf4470c7e1..000000000000 --- a/Documentation/slow-work.txt +++ /dev/null @@ -1,322 +0,0 @@ - ==================================== - SLOW WORK ITEM EXECUTION THREAD POOL - ==================================== - -By: David Howells - -The slow work item execution thread pool is a pool of threads for performing -things that take a relatively long time, such as making mkdir calls. -Typically, when processing something, these items will spend a lot of time -blocking a thread on I/O, thus making that thread unavailable for doing other -work. - -The standard workqueue model is unsuitable for this class of work item as that -limits the owner to a single thread or a single thread per CPU. For some -tasks, however, more threads - or fewer - are required. - -There is just one pool per system. It contains no threads unless something -wants to use it - and that something must register its interest first. When -the pool is active, the number of threads it contains is dynamic, varying -between a maximum and minimum setting, depending on the load. - - -==================== -CLASSES OF WORK ITEM -==================== - -This pool support two classes of work items: - - (*) Slow work items. - - (*) Very slow work items. - -The former are expected to finish much quicker than the latter. - -An operation of the very slow class may do a batch combination of several -lookups, mkdirs, and a create for instance. - -An operation of the ordinarily slow class may, for example, write stuff or -expand files, provided the time taken to do so isn't too long. - -Operations of both types may sleep during execution, thus tying up the thread -loaned to it. - -A further class of work item is available, based on the slow work item class: - - (*) Delayed slow work items. - -These are slow work items that have a timer to defer queueing of the item for -a while. - - -THREAD-TO-CLASS ALLOCATION --------------------------- - -Not all the threads in the pool are available to work on very slow work items. -The number will be between one and one fewer than the number of active threads. -This is configurable (see the "Pool Configuration" section). - -All the threads are available to work on ordinarily slow work items, but a -percentage of the threads will prefer to work on very slow work items. - -The configuration ensures that at least one thread will be available to work on -very slow work items, and at least one thread will be available that won't work -on very slow work items at all. - - -===================== -USING SLOW WORK ITEMS -===================== - -Firstly, a module or subsystem wanting to make use of slow work items must -register its interest: - - int ret = slow_work_register_user(struct module *module); - -This will return 0 if successful, or a -ve error upon failure. The module -pointer should be the module interested in using this facility (almost -certainly THIS_MODULE). - - -Slow work items may then be set up by: - - (1) Declaring a slow_work struct type variable: - - #include - - struct slow_work myitem; - - (2) Declaring the operations to be used for this item: - - struct slow_work_ops myitem_ops = { - .get_ref = myitem_get_ref, - .put_ref = myitem_put_ref, - .execute = myitem_execute, - }; - - [*] For a description of the ops, see section "Item Operations". - - (3) Initialising the item: - - slow_work_init(&myitem, &myitem_ops); - - or: - - delayed_slow_work_init(&myitem, &myitem_ops); - - or: - - vslow_work_init(&myitem, &myitem_ops); - - depending on its class. - -A suitably set up work item can then be enqueued for processing: - - int ret = slow_work_enqueue(&myitem); - -This will return a -ve error if the thread pool is unable to gain a reference -on the item, 0 otherwise, or (for delayed work): - - int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay); - - -The items are reference counted, so there ought to be no need for a flush -operation. But as the reference counting is optional, means to cancel -existing work items are also included: - - cancel_slow_work(&myitem); - cancel_delayed_slow_work(&myitem); - -can be used to cancel pending work. The above cancel function waits for -existing work to have been executed (or prevent execution of them, depending -on timing). - - -When all a module's slow work items have been processed, and the -module has no further interest in the facility, it should unregister its -interest: - - slow_work_unregister_user(struct module *module); - -The module pointer is used to wait for all outstanding work items for that -module before completing the unregistration. This prevents the put_ref() code -from being taken away before it completes. module should almost certainly be -THIS_MODULE. - - -================ -HELPER FUNCTIONS -================ - -The slow-work facility provides a function by which it can be determined -whether or not an item is queued for later execution: - - bool queued = slow_work_is_queued(struct slow_work *work); - -If it returns false, then the item is not on the queue (it may be executing -with a requeue pending). This can be used to work out whether an item on which -another depends is on the queue, thus allowing a dependent item to be queued -after it. - -If the above shows an item on which another depends not to be queued, then the -owner of the dependent item might need to wait. However, to avoid locking up -the threads unnecessarily be sleeping in them, it can make sense under some -circumstances to return the work item to the queue, thus deferring it until -some other items have had a chance to make use of the yielded thread. - -To yield a thread and defer an item, the work function should simply enqueue -the work item again and return. However, this doesn't work if there's nothing -actually on the queue, as the thread just vacated will jump straight back into -the item's work function, thus busy waiting on a CPU. - -Instead, the item should use the thread to wait for the dependency to go away, -but rather than using schedule() or schedule_timeout() to sleep, it should use -the following function: - - bool requeue = slow_work_sleep_till_thread_needed( - struct slow_work *work, - signed long *_timeout); - -This will add a second wait and then sleep, such that it will be woken up if -either something appears on the queue that could usefully make use of the -thread - and behind which this item can be queued, or if the event the caller -set up to wait for happens. True will be returned if something else appeared -on the queue and this work function should perhaps return, of false if -something else woke it up. The timeout is as for schedule_timeout(). - -For example: - - wq = bit_waitqueue(&my_flags, MY_BIT); - init_wait(&wait); - requeue = false; - do { - prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); - if (!test_bit(MY_BIT, &my_flags)) - break; - requeue = slow_work_sleep_till_thread_needed(&my_work, - &timeout); - } while (timeout > 0 && !requeue); - finish_wait(wq, &wait); - if (!test_bit(MY_BIT, &my_flags) - goto do_my_thing; - if (requeue) - return; // to slow_work - - -=============== -ITEM OPERATIONS -=============== - -Each work item requires a table of operations of type struct slow_work_ops. -Only ->execute() is required; the getting and putting of a reference and the -describing of an item are all optional. - - (*) Get a reference on an item: - - int (*get_ref)(struct slow_work *work); - - This allows the thread pool to attempt to pin an item by getting a - reference on it. This function should return 0 if the reference was - granted, or a -ve error otherwise. If an error is returned, - slow_work_enqueue() will fail. - - The reference is held whilst the item is queued and whilst it is being - executed. The item may then be requeued with the same reference held, or - the reference will be released. - - (*) Release a reference on an item: - - void (*put_ref)(struct slow_work *work); - - This allows the thread pool to unpin an item by releasing the reference on - it. The thread pool will not touch the item again once this has been - called. - - (*) Execute an item: - - void (*execute)(struct slow_work *work); - - This should perform the work required of the item. It may sleep, it may - perform disk I/O and it may wait for locks. - - (*) View an item through /proc: - - void (*desc)(struct slow_work *work, struct seq_file *m); - - If supplied, this should print to 'm' a small string describing the work - the item is to do. This should be no more than about 40 characters, and - shouldn't include a newline character. - - See the 'Viewing executing and queued items' section below. - - -================== -POOL CONFIGURATION -================== - -The slow-work thread pool has a number of configurables: - - (*) /proc/sys/kernel/slow-work/min-threads - - The minimum number of threads that should be in the pool whilst it is in - use. This may be anywhere between 2 and max-threads. - - (*) /proc/sys/kernel/slow-work/max-threads - - The maximum number of threads that should in the pool. This may be - anywhere between min-threads and 255 or NR_CPUS * 2, whichever is greater. - - (*) /proc/sys/kernel/slow-work/vslow-percentage - - The percentage of active threads in the pool that may be used to execute - very slow work items. This may be between 1 and 99. The resultant number - is bounded to between 1 and one fewer than the number of active threads. - This ensures there is always at least one thread that can process very - slow work items, and always at least one thread that won't. - - -================================== -VIEWING EXECUTING AND QUEUED ITEMS -================================== - -If CONFIG_SLOW_WORK_DEBUG is enabled, a debugfs file is made available: - - /sys/kernel/debug/slow_work/runqueue - -through which the list of work items being executed and the queues of items to -be executed may be viewed. The owner of a work item is given the chance to -add some information of its own. - -The contents look something like the following: - - THR PID ITEM ADDR FL MARK DESC - === ===== ================ == ===== ========== - 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK - 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2 - 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK - 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN - 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2 - 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2 - 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2 - 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN - vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2 - vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2 - vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2 - vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2 - vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2 - vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2 - vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK - vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK - vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK - vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK - vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK - vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK - vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK - vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK - -In the 'THR' column, executing items show the thread they're occupying and -queued threads indicate which queue they're on. 'PID' shows the process ID of -a slow-work thread that's executing something. 'FL' shows the work item flags. -'MARK' indicates how long since an item was queued or began executing. Lastly, -the 'DESC' column permits the owner of an item to give some information. - diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h deleted file mode 100644 index 13337bf6c3f5..000000000000 --- a/include/linux/slow-work.h +++ /dev/null @@ -1,163 +0,0 @@ -/* Worker thread pool for slow items, such as filesystem lookups or mkdirs - * - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - * - * See Documentation/slow-work.txt - */ - -#ifndef _LINUX_SLOW_WORK_H -#define _LINUX_SLOW_WORK_H - -#ifdef CONFIG_SLOW_WORK - -#include -#include - -struct slow_work; -#ifdef CONFIG_SLOW_WORK_DEBUG -struct seq_file; -#endif - -/* - * The operations used to support slow work items - */ -struct slow_work_ops { - /* owner */ - struct module *owner; - - /* get a ref on a work item - * - return 0 if successful, -ve if not - */ - int (*get_ref)(struct slow_work *work); - - /* discard a ref to a work item */ - void (*put_ref)(struct slow_work *work); - - /* execute a work item */ - void (*execute)(struct slow_work *work); - -#ifdef CONFIG_SLOW_WORK_DEBUG - /* describe a work item for debugfs */ - void (*desc)(struct slow_work *work, struct seq_file *m); -#endif -}; - -/* - * A slow work item - * - A reference is held on the parent object by the thread pool when it is - * queued - */ -struct slow_work { - struct module *owner; /* the owning module */ - unsigned long flags; -#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ -#define SLOW_WORK_EXECUTING 1 /* item currently executing */ -#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ -#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ -#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ -#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ - const struct slow_work_ops *ops; /* operations table for this item */ - struct list_head link; /* link in queue */ -#ifdef CONFIG_SLOW_WORK_DEBUG - struct timespec mark; /* jiffies at which queued or exec begun */ -#endif -}; - -struct delayed_slow_work { - struct slow_work work; - struct timer_list timer; -}; - -/** - * slow_work_init - Initialise a slow work item - * @work: The work item to initialise - * @ops: The operations to use to handle the slow work item - * - * Initialise a slow work item. - */ -static inline void slow_work_init(struct slow_work *work, - const struct slow_work_ops *ops) -{ - work->flags = 0; - work->ops = ops; - INIT_LIST_HEAD(&work->link); -} - -/** - * slow_work_init - Initialise a delayed slow work item - * @work: The work item to initialise - * @ops: The operations to use to handle the slow work item - * - * Initialise a delayed slow work item. - */ -static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, - const struct slow_work_ops *ops) -{ - init_timer(&dwork->timer); - slow_work_init(&dwork->work, ops); -} - -/** - * vslow_work_init - Initialise a very slow work item - * @work: The work item to initialise - * @ops: The operations to use to handle the slow work item - * - * Initialise a very slow work item. This item will be restricted such that - * only a certain number of the pool threads will be able to execute items of - * this type. - */ -static inline void vslow_work_init(struct slow_work *work, - const struct slow_work_ops *ops) -{ - work->flags = 1 << SLOW_WORK_VERY_SLOW; - work->ops = ops; - INIT_LIST_HEAD(&work->link); -} - -/** - * slow_work_is_queued - Determine if a slow work item is on the work queue - * work: The work item to test - * - * Determine if the specified slow-work item is on the work queue. This - * returns true if it is actually on the queue. - * - * If the item is executing and has been marked for requeue when execution - * finishes, then false will be returned. - * - * Anyone wishing to wait for completion of execution can wait on the - * SLOW_WORK_EXECUTING bit. - */ -static inline bool slow_work_is_queued(struct slow_work *work) -{ - unsigned long flags = work->flags; - return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING); -} - -extern int slow_work_enqueue(struct slow_work *work); -extern void slow_work_cancel(struct slow_work *work); -extern int slow_work_register_user(struct module *owner); -extern void slow_work_unregister_user(struct module *owner); - -extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, - unsigned long delay); - -static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) -{ - slow_work_cancel(&dwork->work); -} - -extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, - signed long *_timeout); - -#ifdef CONFIG_SYSCTL -extern ctl_table slow_work_sysctls[]; -#endif - -#endif /* CONFIG_SLOW_WORK */ -#endif /* _LINUX_SLOW_WORK_H */ diff --git a/init/Kconfig b/init/Kconfig index 5cff9a980c39..cb64c5889e02 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1143,30 +1143,6 @@ config TRACEPOINTS source "arch/Kconfig" -config SLOW_WORK - default n - bool - help - The slow work thread pool provides a number of dynamically allocated - threads that can be used by the kernel to perform operations that - take a relatively long time. - - An example of this would be CacheFiles doing a path lookup followed - by a series of mkdirs and a create call, all of which have to touch - disk. - - See Documentation/slow-work.txt. - -config SLOW_WORK_DEBUG - bool "Slow work debugging through debugfs" - default n - depends on SLOW_WORK && DEBUG_FS - help - Display the contents of the slow work run queue through debugfs, - including items currently executing. - - See Documentation/slow-work.txt. - endmenu # General setup config HAVE_GENERIC_DMA_COHERENT diff --git a/kernel/Makefile b/kernel/Makefile index 057472fbc272..2484ac39b2e2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -99,8 +99,6 @@ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o -obj-$(CONFIG_SLOW_WORK) += slow-work.o -obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c deleted file mode 100644 index e45c43645298..000000000000 --- a/kernel/slow-work-debugfs.c +++ /dev/null @@ -1,227 +0,0 @@ -/* Slow work debugging - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include "slow-work.h" - -#define ITERATOR_SHIFT (BITS_PER_LONG - 4) -#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) -#define ITERATOR_COUNTER (~ITERATOR_SELECTOR) - -void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) -{ - seq_puts(m, "Slow-work: New thread"); -} - -/* - * Render the time mark field on a work item into a 5-char time with units plus - * a space - */ -static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) -{ - struct timespec now, diff; - - now = CURRENT_TIME; - diff = timespec_sub(now, work->mark); - - if (diff.tv_sec < 0) - seq_puts(m, " -ve "); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) - seq_printf(m, "%3luns ", diff.tv_nsec); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) - seq_printf(m, "%3luus ", diff.tv_nsec / 1000); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) - seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); - else if (diff.tv_sec <= 1) - seq_puts(m, " 1s "); - else if (diff.tv_sec < 60) - seq_printf(m, "%4lus ", diff.tv_sec); - else if (diff.tv_sec < 60 * 60) - seq_printf(m, "%4lum ", diff.tv_sec / 60); - else if (diff.tv_sec < 60 * 60 * 24) - seq_printf(m, "%4luh ", diff.tv_sec / 3600); - else - seq_puts(m, "exces "); -} - -/* - * Describe a slow work item for debugfs - */ -static int slow_work_runqueue_show(struct seq_file *m, void *v) -{ - struct slow_work *work; - struct list_head *p = v; - unsigned long id; - - switch ((unsigned long) v) { - case 1: - seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); - return 0; - case 2: - seq_puts(m, "=== ===== ================ == ===== ==========\n"); - return 0; - - case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: - id = (unsigned long) v - 3; - - read_lock(&slow_work_execs_lock); - work = slow_work_execs[id]; - if (work) { - smp_read_barrier_depends(); - - seq_printf(m, "%3lu %5d %16p %2lx ", - id, slow_work_pids[id], work, work->flags); - slow_work_print_mark(m, work); - - if (work->ops->desc) - work->ops->desc(work, m); - seq_putc(m, '\n'); - } - read_unlock(&slow_work_execs_lock); - return 0; - - default: - work = list_entry(p, struct slow_work, link); - seq_printf(m, "%3s - %16p %2lx ", - work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", - work, work->flags); - slow_work_print_mark(m, work); - - if (work->ops->desc) - work->ops->desc(work, m); - seq_putc(m, '\n'); - return 0; - } -} - -/* - * map the iterator to a work item - */ -static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) -{ - struct list_head *p; - unsigned long count, id; - - switch (*_pos >> ITERATOR_SHIFT) { - case 0x0: - if (*_pos == 0) - *_pos = 1; - if (*_pos < 3) - return (void *)(unsigned long) *_pos; - if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) - for (id = *_pos - 3; - id < SLOW_WORK_THREAD_LIMIT; - id++, (*_pos)++) - if (slow_work_execs[id]) - return (void *)(unsigned long) *_pos; - *_pos = 0x1UL << ITERATOR_SHIFT; - - case 0x1: - count = *_pos & ITERATOR_COUNTER; - list_for_each(p, &slow_work_queue) { - if (count == 0) - return p; - count--; - } - *_pos = 0x2UL << ITERATOR_SHIFT; - - case 0x2: - count = *_pos & ITERATOR_COUNTER; - list_for_each(p, &vslow_work_queue) { - if (count == 0) - return p; - count--; - } - *_pos = 0x3UL << ITERATOR_SHIFT; - - default: - return NULL; - } -} - -/* - * set up the iterator to start reading from the first line - */ -static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) -{ - spin_lock_irq(&slow_work_queue_lock); - return slow_work_runqueue_index(m, _pos); -} - -/* - * move to the next line - */ -static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) -{ - struct list_head *p = v; - unsigned long selector = *_pos >> ITERATOR_SHIFT; - - (*_pos)++; - switch (selector) { - case 0x0: - return slow_work_runqueue_index(m, _pos); - - case 0x1: - if (*_pos >> ITERATOR_SHIFT == 0x1) { - p = p->next; - if (p != &slow_work_queue) - return p; - } - *_pos = 0x2UL << ITERATOR_SHIFT; - p = &vslow_work_queue; - - case 0x2: - if (*_pos >> ITERATOR_SHIFT == 0x2) { - p = p->next; - if (p != &vslow_work_queue) - return p; - } - *_pos = 0x3UL << ITERATOR_SHIFT; - - default: - return NULL; - } -} - -/* - * clean up after reading - */ -static void slow_work_runqueue_stop(struct seq_file *m, void *v) -{ - spin_unlock_irq(&slow_work_queue_lock); -} - -static const struct seq_operations slow_work_runqueue_ops = { - .start = slow_work_runqueue_start, - .stop = slow_work_runqueue_stop, - .next = slow_work_runqueue_next, - .show = slow_work_runqueue_show, -}; - -/* - * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents - */ -static int slow_work_runqueue_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &slow_work_runqueue_ops); -} - -const struct file_operations slow_work_runqueue_fops = { - .owner = THIS_MODULE, - .open = slow_work_runqueue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; diff --git a/kernel/slow-work.c b/kernel/slow-work.c deleted file mode 100644 index 7d3f4fa9ef4f..000000000000 --- a/kernel/slow-work.c +++ /dev/null @@ -1,1068 +0,0 @@ -/* Worker thread pool for slow items, such as filesystem lookups or mkdirs - * - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - * - * See Documentation/slow-work.txt - */ - -#include -#include -#include -#include -#include -#include -#include "slow-work.h" - -static void slow_work_cull_timeout(unsigned long); -static void slow_work_oom_timeout(unsigned long); - -#ifdef CONFIG_SYSCTL -static int slow_work_min_threads_sysctl(struct ctl_table *, int, - void __user *, size_t *, loff_t *); - -static int slow_work_max_threads_sysctl(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -#endif - -/* - * The pool of threads has at least min threads in it as long as someone is - * using the facility, and may have as many as max. - * - * A portion of the pool may be processing very slow operations. - */ -static unsigned slow_work_min_threads = 2; -static unsigned slow_work_max_threads = 4; -static unsigned vslow_work_proportion = 50; /* % of threads that may process - * very slow work */ - -#ifdef CONFIG_SYSCTL -static const int slow_work_min_min_threads = 2; -static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT; -static const int slow_work_min_vslow = 1; -static const int slow_work_max_vslow = 99; - -ctl_table slow_work_sysctls[] = { - { - .procname = "min-threads", - .data = &slow_work_min_threads, - .maxlen = sizeof(unsigned), - .mode = 0644, - .proc_handler = slow_work_min_threads_sysctl, - .extra1 = (void *) &slow_work_min_min_threads, - .extra2 = &slow_work_max_threads, - }, - { - .procname = "max-threads", - .data = &slow_work_max_threads, - .maxlen = sizeof(unsigned), - .mode = 0644, - .proc_handler = slow_work_max_threads_sysctl, - .extra1 = &slow_work_min_threads, - .extra2 = (void *) &slow_work_max_max_threads, - }, - { - .procname = "vslow-percentage", - .data = &vslow_work_proportion, - .maxlen = sizeof(unsigned), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = (void *) &slow_work_min_vslow, - .extra2 = (void *) &slow_work_max_vslow, - }, - {} -}; -#endif - -/* - * The active state of the thread pool - */ -static atomic_t slow_work_thread_count; -static atomic_t vslow_work_executing_count; - -static bool slow_work_may_not_start_new_thread; -static bool slow_work_cull; /* cull a thread due to lack of activity */ -static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0); -static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); -static struct slow_work slow_work_new_thread; /* new thread starter */ - -/* - * slow work ID allocation (use slow_work_queue_lock) - */ -static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT); - -/* - * Unregistration tracking to prevent put_ref() from disappearing during module - * unload - */ -#ifdef CONFIG_MODULES -static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT]; -static struct module *slow_work_unreg_module; -static struct slow_work *slow_work_unreg_work_item; -static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); -static DEFINE_MUTEX(slow_work_unreg_sync_lock); - -static void slow_work_set_thread_processing(int id, struct slow_work *work) -{ - if (work) - slow_work_thread_processing[id] = work->owner; -} -static void slow_work_done_thread_processing(int id, struct slow_work *work) -{ - struct module *module = slow_work_thread_processing[id]; - - slow_work_thread_processing[id] = NULL; - smp_mb(); - if (slow_work_unreg_work_item == work || - slow_work_unreg_module == module) - wake_up_all(&slow_work_unreg_wq); -} -static void slow_work_clear_thread_processing(int id) -{ - slow_work_thread_processing[id] = NULL; -} -#else -static void slow_work_set_thread_processing(int id, struct slow_work *work) {} -static void slow_work_done_thread_processing(int id, struct slow_work *work) {} -static void slow_work_clear_thread_processing(int id) {} -#endif - -/* - * Data for tracking currently executing items for indication through /proc - */ -#ifdef CONFIG_SLOW_WORK_DEBUG -struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; -pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; -DEFINE_RWLOCK(slow_work_execs_lock); -#endif - -/* - * The queues of work items and the lock governing access to them. These are - * shared between all the CPUs. It doesn't make sense to have per-CPU queues - * as the number of threads bears no relation to the number of CPUs. - * - * There are two queues of work items: one for slow work items, and one for - * very slow work items. - */ -LIST_HEAD(slow_work_queue); -LIST_HEAD(vslow_work_queue); -DEFINE_SPINLOCK(slow_work_queue_lock); - -/* - * The following are two wait queues that get pinged when a work item is placed - * on an empty queue. These allow work items that are hogging a thread by - * sleeping in a way that could be deferred to yield their thread and enqueue - * themselves. - */ -static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation); -static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation); - -/* - * The thread controls. A variable used to signal to the threads that they - * should exit when the queue is empty, a waitqueue used by the threads to wait - * for signals, and a completion set by the last thread to exit. - */ -static bool slow_work_threads_should_exit; -static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq); -static DECLARE_COMPLETION(slow_work_last_thread_exited); - -/* - * The number of users of the thread pool and its lock. Whilst this is zero we - * have no threads hanging around, and when this reaches zero, we wait for all - * active or queued work items to complete and kill all the threads we do have. - */ -static int slow_work_user_count; -static DEFINE_MUTEX(slow_work_user_lock); - -static inline int slow_work_get_ref(struct slow_work *work) -{ - if (work->ops->get_ref) - return work->ops->get_ref(work); - - return 0; -} - -static inline void slow_work_put_ref(struct slow_work *work) -{ - if (work->ops->put_ref) - work->ops->put_ref(work); -} - -/* - * Calculate the maximum number of active threads in the pool that are - * permitted to process very slow work items. - * - * The answer is rounded up to at least 1, but may not equal or exceed the - * maximum number of the threads in the pool. This means we always have at - * least one thread that can process slow work items, and we always have at - * least one thread that won't get tied up doing so. - */ -static unsigned slow_work_calc_vsmax(void) -{ - unsigned vsmax; - - vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion; - vsmax /= 100; - vsmax = max(vsmax, 1U); - return min(vsmax, slow_work_max_threads - 1); -} - -/* - * Attempt to execute stuff queued on a slow thread. Return true if we managed - * it, false if there was nothing to do. - */ -static noinline bool slow_work_execute(int id) -{ - struct slow_work *work = NULL; - unsigned vsmax; - bool very_slow; - - vsmax = slow_work_calc_vsmax(); - - /* see if we can schedule a new thread to be started if we're not - * keeping up with the work */ - if (!waitqueue_active(&slow_work_thread_wq) && - (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) && - atomic_read(&slow_work_thread_count) < slow_work_max_threads && - !slow_work_may_not_start_new_thread) - slow_work_enqueue(&slow_work_new_thread); - - /* find something to execute */ - spin_lock_irq(&slow_work_queue_lock); - if (!list_empty(&vslow_work_queue) && - atomic_read(&vslow_work_executing_count) < vsmax) { - work = list_entry(vslow_work_queue.next, - struct slow_work, link); - if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) - BUG(); - list_del_init(&work->link); - atomic_inc(&vslow_work_executing_count); - very_slow = true; - } else if (!list_empty(&slow_work_queue)) { - work = list_entry(slow_work_queue.next, - struct slow_work, link); - if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) - BUG(); - list_del_init(&work->link); - very_slow = false; - } else { - very_slow = false; /* avoid the compiler warning */ - } - - slow_work_set_thread_processing(id, work); - if (work) { - slow_work_mark_time(work); - slow_work_begin_exec(id, work); - } - - spin_unlock_irq(&slow_work_queue_lock); - - if (!work) - return false; - - if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) - BUG(); - - /* don't execute if the work is in the process of being cancelled */ - if (!test_bit(SLOW_WORK_CANCELLING, &work->flags)) - work->ops->execute(work); - - if (very_slow) - atomic_dec(&vslow_work_executing_count); - clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); - - /* wake up anyone waiting for this work to be complete */ - wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); - - slow_work_end_exec(id, work); - - /* if someone tried to enqueue the item whilst we were executing it, - * then it'll be left unenqueued to avoid multiple threads trying to - * execute it simultaneously - * - * there is, however, a race between us testing the pending flag and - * getting the spinlock, and between the enqueuer setting the pending - * flag and getting the spinlock, so we use a deferral bit to tell us - * if the enqueuer got there first - */ - if (test_bit(SLOW_WORK_PENDING, &work->flags)) { - spin_lock_irq(&slow_work_queue_lock); - - if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) && - test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) - goto auto_requeue; - - spin_unlock_irq(&slow_work_queue_lock); - } - - /* sort out the race between module unloading and put_ref() */ - slow_work_put_ref(work); - slow_work_done_thread_processing(id, work); - - return true; - -auto_requeue: - /* we must complete the enqueue operation - * - we transfer our ref on the item back to the appropriate queue - * - don't wake another thread up as we're awake already - */ - slow_work_mark_time(work); - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) - list_add_tail(&work->link, &vslow_work_queue); - else - list_add_tail(&work->link, &slow_work_queue); - spin_unlock_irq(&slow_work_queue_lock); - slow_work_clear_thread_processing(id); - return true; -} - -/** - * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work - * work: The work item under execution that wants to sleep - * _timeout: Scheduler sleep timeout - * - * Allow a requeueable work item to sleep on a slow-work processor thread until - * that thread is needed to do some other work or the sleep is interrupted by - * some other event. - * - * The caller must set up a wake up event before calling this and must have set - * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own - * condition before calling this function as no test is made here. - * - * False is returned if there is nothing on the queue; true is returned if the - * work item should be requeued - */ -bool slow_work_sleep_till_thread_needed(struct slow_work *work, - signed long *_timeout) -{ - wait_queue_head_t *wfo_wq; - struct list_head *queue; - - DEFINE_WAIT(wait); - - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { - wfo_wq = &vslow_work_queue_waits_for_occupation; - queue = &vslow_work_queue; - } else { - wfo_wq = &slow_work_queue_waits_for_occupation; - queue = &slow_work_queue; - } - - if (!list_empty(queue)) - return true; - - add_wait_queue_exclusive(wfo_wq, &wait); - if (list_empty(queue)) - *_timeout = schedule_timeout(*_timeout); - finish_wait(wfo_wq, &wait); - - return !list_empty(queue); -} -EXPORT_SYMBOL(slow_work_sleep_till_thread_needed); - -/** - * slow_work_enqueue - Schedule a slow work item for processing - * @work: The work item to queue - * - * Schedule a slow work item for processing. If the item is already undergoing - * execution, this guarantees not to re-enter the execution routine until the - * first execution finishes. - * - * The item is pinned by this function as it retains a reference to it, managed - * through the item operations. The item is unpinned once it has been - * executed. - * - * An item may hog the thread that is running it for a relatively large amount - * of time, sufficient, for example, to perform several lookup, mkdir, create - * and setxattr operations. It may sleep on I/O and may sleep to obtain locks. - * - * Conversely, if a number of items are awaiting processing, it may take some - * time before any given item is given attention. The number of threads in the - * pool may be increased to deal with demand, but only up to a limit. - * - * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in - * the very slow queue, from which only a portion of the threads will be - * allowed to pick items to execute. This ensures that very slow items won't - * overly block ones that are just ordinarily slow. - * - * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is - * attempted queued) - */ -int slow_work_enqueue(struct slow_work *work) -{ - wait_queue_head_t *wfo_wq; - struct list_head *queue; - unsigned long flags; - int ret; - - if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) - return -ECANCELED; - - BUG_ON(slow_work_user_count <= 0); - BUG_ON(!work); - BUG_ON(!work->ops); - - /* when honouring an enqueue request, we only promise that we will run - * the work function in the future; we do not promise to run it once - * per enqueue request - * - * we use the PENDING bit to merge together repeat requests without - * having to disable IRQs and take the spinlock, whilst still - * maintaining our promise - */ - if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { - wfo_wq = &vslow_work_queue_waits_for_occupation; - queue = &vslow_work_queue; - } else { - wfo_wq = &slow_work_queue_waits_for_occupation; - queue = &slow_work_queue; - } - - spin_lock_irqsave(&slow_work_queue_lock, flags); - - if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) - goto cancelled; - - /* we promise that we will not attempt to execute the work - * function in more than one thread simultaneously - * - * this, however, leaves us with a problem if we're asked to - * enqueue the work whilst someone is executing the work - * function as simply queueing the work immediately means that - * another thread may try executing it whilst it is already - * under execution - * - * to deal with this, we set the ENQ_DEFERRED bit instead of - * enqueueing, and the thread currently executing the work - * function will enqueue the work item when the work function - * returns and it has cleared the EXECUTING bit - */ - if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { - set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); - } else { - ret = slow_work_get_ref(work); - if (ret < 0) - goto failed; - slow_work_mark_time(work); - list_add_tail(&work->link, queue); - wake_up(&slow_work_thread_wq); - - /* if someone who could be requeued is sleeping on a - * thread, then ask them to yield their thread */ - if (work->link.prev == queue) - wake_up(wfo_wq); - } - - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - } - return 0; - -cancelled: - ret = -ECANCELED; -failed: - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - return ret; -} -EXPORT_SYMBOL(slow_work_enqueue); - -static int slow_work_wait(void *word) -{ - schedule(); - return 0; -} - -/** - * slow_work_cancel - Cancel a slow work item - * @work: The work item to cancel - * - * This function will cancel a previously enqueued work item. If we cannot - * cancel the work item, it is guarenteed to have run when this function - * returns. - */ -void slow_work_cancel(struct slow_work *work) -{ - bool wait = true, put = false; - - set_bit(SLOW_WORK_CANCELLING, &work->flags); - smp_mb(); - - /* if the work item is a delayed work item with an active timer, we - * need to wait for the timer to finish _before_ getting the spinlock, - * lest we deadlock against the timer routine - * - * the timer routine will leave DELAYED set if it notices the - * CANCELLING flag in time - */ - if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { - struct delayed_slow_work *dwork = - container_of(work, struct delayed_slow_work, work); - del_timer_sync(&dwork->timer); - } - - spin_lock_irq(&slow_work_queue_lock); - - if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { - /* the timer routine aborted or never happened, so we are left - * holding the timer's reference on the item and should just - * drop the pending flag and wait for any ongoing execution to - * finish */ - struct delayed_slow_work *dwork = - container_of(work, struct delayed_slow_work, work); - - BUG_ON(timer_pending(&dwork->timer)); - BUG_ON(!list_empty(&work->link)); - - clear_bit(SLOW_WORK_DELAYED, &work->flags); - put = true; - clear_bit(SLOW_WORK_PENDING, &work->flags); - - } else if (test_bit(SLOW_WORK_PENDING, &work->flags) && - !list_empty(&work->link)) { - /* the link in the pending queue holds a reference on the item - * that we will need to release */ - list_del_init(&work->link); - wait = false; - put = true; - clear_bit(SLOW_WORK_PENDING, &work->flags); - - } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) { - /* the executor is holding our only reference on the item, so - * we merely need to wait for it to finish executing */ - clear_bit(SLOW_WORK_PENDING, &work->flags); - } - - spin_unlock_irq(&slow_work_queue_lock); - - /* the EXECUTING flag is set by the executor whilst the spinlock is set - * and before the item is dequeued - so assuming the above doesn't - * actually dequeue it, simply waiting for the EXECUTING flag to be - * released here should be sufficient */ - if (wait) - wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait, - TASK_UNINTERRUPTIBLE); - - clear_bit(SLOW_WORK_CANCELLING, &work->flags); - if (put) - slow_work_put_ref(work); -} -EXPORT_SYMBOL(slow_work_cancel); - -/* - * Handle expiry of the delay timer, indicating that a delayed slow work item - * should now be queued if not cancelled - */ -static void delayed_slow_work_timer(unsigned long data) -{ - wait_queue_head_t *wfo_wq; - struct list_head *queue; - struct slow_work *work = (struct slow_work *) data; - unsigned long flags; - bool queued = false, put = false, first = false; - - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { - wfo_wq = &vslow_work_queue_waits_for_occupation; - queue = &vslow_work_queue; - } else { - wfo_wq = &slow_work_queue_waits_for_occupation; - queue = &slow_work_queue; - } - - spin_lock_irqsave(&slow_work_queue_lock, flags); - if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { - clear_bit(SLOW_WORK_DELAYED, &work->flags); - - if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { - /* we discard the reference the timer was holding in - * favour of the one the executor holds */ - set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); - put = true; - } else { - slow_work_mark_time(work); - list_add_tail(&work->link, queue); - queued = true; - if (work->link.prev == queue) - first = true; - } - } - - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - if (put) - slow_work_put_ref(work); - if (first) - wake_up(wfo_wq); - if (queued) - wake_up(&slow_work_thread_wq); -} - -/** - * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing - * @dwork: The delayed work item to queue - * @delay: When to start executing the work, in jiffies from now - * - * This is similar to slow_work_enqueue(), but it adds a delay before the work - * is actually queued for processing. - * - * The item can have delayed processing requested on it whilst it is being - * executed. The delay will begin immediately, and if it expires before the - * item finishes executing, the item will be placed back on the queue when it - * has done executing. - */ -int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, - unsigned long delay) -{ - struct slow_work *work = &dwork->work; - unsigned long flags; - int ret; - - if (delay == 0) - return slow_work_enqueue(&dwork->work); - - BUG_ON(slow_work_user_count <= 0); - BUG_ON(!work); - BUG_ON(!work->ops); - - if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) - return -ECANCELED; - - if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { - spin_lock_irqsave(&slow_work_queue_lock, flags); - - if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) - goto cancelled; - - /* the timer holds a reference whilst it is pending */ - ret = slow_work_get_ref(work); - if (ret < 0) - goto cant_get_ref; - - if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags)) - BUG(); - dwork->timer.expires = jiffies + delay; - dwork->timer.data = (unsigned long) work; - dwork->timer.function = delayed_slow_work_timer; - add_timer(&dwork->timer); - - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - } - - return 0; - -cancelled: - ret = -ECANCELED; -cant_get_ref: - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - return ret; -} -EXPORT_SYMBOL(delayed_slow_work_enqueue); - -/* - * Schedule a cull of the thread pool at some time in the near future - */ -static void slow_work_schedule_cull(void) -{ - mod_timer(&slow_work_cull_timer, - round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT)); -} - -/* - * Worker thread culling algorithm - */ -static bool slow_work_cull_thread(void) -{ - unsigned long flags; - bool do_cull = false; - - spin_lock_irqsave(&slow_work_queue_lock, flags); - - if (slow_work_cull) { - slow_work_cull = false; - - if (list_empty(&slow_work_queue) && - list_empty(&vslow_work_queue) && - atomic_read(&slow_work_thread_count) > - slow_work_min_threads) { - slow_work_schedule_cull(); - do_cull = true; - } - } - - spin_unlock_irqrestore(&slow_work_queue_lock, flags); - return do_cull; -} - -/* - * Determine if there is slow work available for dispatch - */ -static inline bool slow_work_available(int vsmax) -{ - return !list_empty(&slow_work_queue) || - (!list_empty(&vslow_work_queue) && - atomic_read(&vslow_work_executing_count) < vsmax); -} - -/* - * Worker thread dispatcher - */ -static int slow_work_thread(void *_data) -{ - int vsmax, id; - - DEFINE_WAIT(wait); - - set_freezable(); - set_user_nice(current, -5); - - /* allocate ourselves an ID */ - spin_lock_irq(&slow_work_queue_lock); - id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); - BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); - __set_bit(id, slow_work_ids); - slow_work_set_thread_pid(id, current->pid); - spin_unlock_irq(&slow_work_queue_lock); - - sprintf(current->comm, "kslowd%03u", id); - - for (;;) { - vsmax = vslow_work_proportion; - vsmax *= atomic_read(&slow_work_thread_count); - vsmax /= 100; - - prepare_to_wait_exclusive(&slow_work_thread_wq, &wait, - TASK_INTERRUPTIBLE); - if (!freezing(current) && - !slow_work_threads_should_exit && - !slow_work_available(vsmax) && - !slow_work_cull) - schedule(); - finish_wait(&slow_work_thread_wq, &wait); - - try_to_freeze(); - - vsmax = vslow_work_proportion; - vsmax *= atomic_read(&slow_work_thread_count); - vsmax /= 100; - - if (slow_work_available(vsmax) && slow_work_execute(id)) { - cond_resched(); - if (list_empty(&slow_work_queue) && - list_empty(&vslow_work_queue) && - atomic_read(&slow_work_thread_count) > - slow_work_min_threads) - slow_work_schedule_cull(); - continue; - } - - if (slow_work_threads_should_exit) - break; - - if (slow_work_cull && slow_work_cull_thread()) - break; - } - - spin_lock_irq(&slow_work_queue_lock); - slow_work_set_thread_pid(id, 0); - __clear_bit(id, slow_work_ids); - spin_unlock_irq(&slow_work_queue_lock); - - if (atomic_dec_and_test(&slow_work_thread_count)) - complete_and_exit(&slow_work_last_thread_exited, 0); - return 0; -} - -/* - * Handle thread cull timer expiration - */ -static void slow_work_cull_timeout(unsigned long data) -{ - slow_work_cull = true; - wake_up(&slow_work_thread_wq); -} - -/* - * Start a new slow work thread - */ -static void slow_work_new_thread_execute(struct slow_work *work) -{ - struct task_struct *p; - - if (slow_work_threads_should_exit) - return; - - if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads) - return; - - if (!mutex_trylock(&slow_work_user_lock)) - return; - - slow_work_may_not_start_new_thread = true; - atomic_inc(&slow_work_thread_count); - p = kthread_run(slow_work_thread, NULL, "kslowd"); - if (IS_ERR(p)) { - printk(KERN_DEBUG "Slow work thread pool: OOM\n"); - if (atomic_dec_and_test(&slow_work_thread_count)) - BUG(); /* we're running on a slow work thread... */ - mod_timer(&slow_work_oom_timer, - round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT)); - } else { - /* ratelimit the starting of new threads */ - mod_timer(&slow_work_oom_timer, jiffies + 1); - } - - mutex_unlock(&slow_work_user_lock); -} - -static const struct slow_work_ops slow_work_new_thread_ops = { - .owner = THIS_MODULE, - .execute = slow_work_new_thread_execute, -#ifdef CONFIG_SLOW_WORK_DEBUG - .desc = slow_work_new_thread_desc, -#endif -}; - -/* - * post-OOM new thread start suppression expiration - */ -static void slow_work_oom_timeout(unsigned long data) -{ - slow_work_may_not_start_new_thread = false; -} - -#ifdef CONFIG_SYSCTL -/* - * Handle adjustment of the minimum number of threads - */ -static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - int n; - - if (ret == 0) { - mutex_lock(&slow_work_user_lock); - if (slow_work_user_count > 0) { - /* see if we need to start or stop threads */ - n = atomic_read(&slow_work_thread_count) - - slow_work_min_threads; - - if (n < 0 && !slow_work_may_not_start_new_thread) - slow_work_enqueue(&slow_work_new_thread); - else if (n > 0) - slow_work_schedule_cull(); - } - mutex_unlock(&slow_work_user_lock); - } - - return ret; -} - -/* - * Handle adjustment of the maximum number of threads - */ -static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - int n; - - if (ret == 0) { - mutex_lock(&slow_work_user_lock); - if (slow_work_user_count > 0) { - /* see if we need to stop threads */ - n = slow_work_max_threads - - atomic_read(&slow_work_thread_count); - - if (n < 0) - slow_work_schedule_cull(); - } - mutex_unlock(&slow_work_user_lock); - } - - return ret; -} -#endif /* CONFIG_SYSCTL */ - -/** - * slow_work_register_user - Register a user of the facility - * @module: The module about to make use of the facility - * - * Register a user of the facility, starting up the initial threads if there - * aren't any other users at this point. This will return 0 if successful, or - * an error if not. - */ -int slow_work_register_user(struct module *module) -{ - struct task_struct *p; - int loop; - - mutex_lock(&slow_work_user_lock); - - if (slow_work_user_count == 0) { - printk(KERN_NOTICE "Slow work thread pool: Starting up\n"); - init_completion(&slow_work_last_thread_exited); - - slow_work_threads_should_exit = false; - slow_work_init(&slow_work_new_thread, - &slow_work_new_thread_ops); - slow_work_may_not_start_new_thread = false; - slow_work_cull = false; - - /* start the minimum number of threads */ - for (loop = 0; loop < slow_work_min_threads; loop++) { - atomic_inc(&slow_work_thread_count); - p = kthread_run(slow_work_thread, NULL, "kslowd"); - if (IS_ERR(p)) - goto error; - } - printk(KERN_NOTICE "Slow work thread pool: Ready\n"); - } - - slow_work_user_count++; - mutex_unlock(&slow_work_user_lock); - return 0; - -error: - if (atomic_dec_and_test(&slow_work_thread_count)) - complete(&slow_work_last_thread_exited); - if (loop > 0) { - printk(KERN_ERR "Slow work thread pool:" - " Aborting startup on ENOMEM\n"); - slow_work_threads_should_exit = true; - wake_up_all(&slow_work_thread_wq); - wait_for_completion(&slow_work_last_thread_exited); - printk(KERN_ERR "Slow work thread pool: Aborted\n"); - } - mutex_unlock(&slow_work_user_lock); - return PTR_ERR(p); -} -EXPORT_SYMBOL(slow_work_register_user); - -/* - * wait for all outstanding items from the calling module to complete - * - note that more items may be queued whilst we're waiting - */ -static void slow_work_wait_for_items(struct module *module) -{ -#ifdef CONFIG_MODULES - DECLARE_WAITQUEUE(myself, current); - struct slow_work *work; - int loop; - - mutex_lock(&slow_work_unreg_sync_lock); - add_wait_queue(&slow_work_unreg_wq, &myself); - - for (;;) { - spin_lock_irq(&slow_work_queue_lock); - - /* first of all, we wait for the last queued item in each list - * to be processed */ - list_for_each_entry_reverse(work, &vslow_work_queue, link) { - if (work->owner == module) { - set_current_state(TASK_UNINTERRUPTIBLE); - slow_work_unreg_work_item = work; - goto do_wait; - } - } - list_for_each_entry_reverse(work, &slow_work_queue, link) { - if (work->owner == module) { - set_current_state(TASK_UNINTERRUPTIBLE); - slow_work_unreg_work_item = work; - goto do_wait; - } - } - - /* then we wait for the items being processed to finish */ - slow_work_unreg_module = module; - smp_mb(); - for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) { - if (slow_work_thread_processing[loop] == module) - goto do_wait; - } - spin_unlock_irq(&slow_work_queue_lock); - break; /* okay, we're done */ - - do_wait: - spin_unlock_irq(&slow_work_queue_lock); - schedule(); - slow_work_unreg_work_item = NULL; - slow_work_unreg_module = NULL; - } - - remove_wait_queue(&slow_work_unreg_wq, &myself); - mutex_unlock(&slow_work_unreg_sync_lock); -#endif /* CONFIG_MODULES */ -} - -/** - * slow_work_unregister_user - Unregister a user of the facility - * @module: The module whose items should be cleared - * - * Unregister a user of the facility, killing all the threads if this was the - * last one. - * - * This waits for all the work items belonging to the nominated module to go - * away before proceeding. - */ -void slow_work_unregister_user(struct module *module) -{ - /* first of all, wait for all outstanding items from the calling module - * to complete */ - if (module) - slow_work_wait_for_items(module); - - /* then we can actually go about shutting down the facility if need - * be */ - mutex_lock(&slow_work_user_lock); - - BUG_ON(slow_work_user_count <= 0); - - slow_work_user_count--; - if (slow_work_user_count == 0) { - printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); - slow_work_threads_should_exit = true; - del_timer_sync(&slow_work_cull_timer); - del_timer_sync(&slow_work_oom_timer); - wake_up_all(&slow_work_thread_wq); - wait_for_completion(&slow_work_last_thread_exited); - printk(KERN_NOTICE "Slow work thread pool:" - " Shut down complete\n"); - } - - mutex_unlock(&slow_work_user_lock); -} -EXPORT_SYMBOL(slow_work_unregister_user); - -/* - * Initialise the slow work facility - */ -static int __init init_slow_work(void) -{ - unsigned nr_cpus = num_possible_cpus(); - - if (slow_work_max_threads < nr_cpus) - slow_work_max_threads = nr_cpus; -#ifdef CONFIG_SYSCTL - if (slow_work_max_max_threads < nr_cpus * 2) - slow_work_max_max_threads = nr_cpus * 2; -#endif -#ifdef CONFIG_SLOW_WORK_DEBUG - { - struct dentry *dbdir; - - dbdir = debugfs_create_dir("slow_work", NULL); - if (dbdir && !IS_ERR(dbdir)) - debugfs_create_file("runqueue", S_IFREG | 0400, dbdir, - NULL, &slow_work_runqueue_fops); - } -#endif - return 0; -} - -subsys_initcall(init_slow_work); diff --git a/kernel/slow-work.h b/kernel/slow-work.h deleted file mode 100644 index a29ebd1ef41d..000000000000 --- a/kernel/slow-work.h +++ /dev/null @@ -1,72 +0,0 @@ -/* Slow work private definitions - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of - * things to do */ -#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after - * OOM */ - -#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ - -/* - * slow-work.c - */ -#ifdef CONFIG_SLOW_WORK_DEBUG -extern struct slow_work *slow_work_execs[]; -extern pid_t slow_work_pids[]; -extern rwlock_t slow_work_execs_lock; -#endif - -extern struct list_head slow_work_queue; -extern struct list_head vslow_work_queue; -extern spinlock_t slow_work_queue_lock; - -/* - * slow-work-debugfs.c - */ -#ifdef CONFIG_SLOW_WORK_DEBUG -extern const struct file_operations slow_work_runqueue_fops; - -extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); -#endif - -/* - * Helper functions - */ -static inline void slow_work_set_thread_pid(int id, pid_t pid) -{ -#ifdef CONFIG_SLOW_WORK_DEBUG - slow_work_pids[id] = pid; -#endif -} - -static inline void slow_work_mark_time(struct slow_work *work) -{ -#ifdef CONFIG_SLOW_WORK_DEBUG - work->mark = CURRENT_TIME; -#endif -} - -static inline void slow_work_begin_exec(int id, struct slow_work *work) -{ -#ifdef CONFIG_SLOW_WORK_DEBUG - slow_work_execs[id] = work; -#endif -} - -static inline void slow_work_end_exec(int id, struct slow_work *work) -{ -#ifdef CONFIG_SLOW_WORK_DEBUG - write_lock(&slow_work_execs_lock); - slow_work_execs[id] = NULL; - write_unlock(&slow_work_execs_lock); -#endif -} diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d24f761f4876..5821365b9605 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -906,13 +905,6 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif -#ifdef CONFIG_SLOW_WORK - { - .procname = "slow-work", - .mode = 0555, - .child = slow_work_sysctls, - }, -#endif #ifdef CONFIG_PERF_EVENTS { .procname = "perf_event_paranoid", -- cgit v1.2.3-59-g8ed1b From 40f2b6ffe525e975203c1621d4d4abaa7689b674 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 24 Jul 2010 11:10:09 +0200 Subject: fscache: fix build on !CONFIG_SYSCTL Commit 8b8edefa (fscache: convert object to use workqueue instead of slow-work) made fscache_exit() call unregister_sysctl_table() unconditionally breaking build when sysctl is disabled. Fix it by putting it inside CONFIG_SYSCTL. Signed-off-by: Tejun Heo Reported-by: Randy Dunlap Cc: David Howells --- fs/fscache/main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 500936d9fff2..f9d856773f79 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -186,7 +186,9 @@ static void __exit fscache_exit(void) kobject_put(fscache_root); kmem_cache_destroy(fscache_cookie_jar); +#ifdef CONFIG_SYSCTL unregister_sysctl_table(fscache_sysctl_header); +#endif fscache_proc_cleanup(); destroy_workqueue(fscache_op_wq); destroy_workqueue(fscache_object_wq); -- cgit v1.2.3-59-g8ed1b From 098849516dd522a343e659740c8f1394a5b7fa69 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 1 Aug 2010 11:50:12 +0200 Subject: workqueue: explain for_each_*cwq_cpu() iterators for_each_*cwq_cpu() are similar to regular CPU iterators except that it also considers the pseudo CPU number used for unbound workqueues. Explain them. Signed-off-by: Tejun Heo Cc: Andrew Morton --- kernel/workqueue.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e5cb7faac58e..1105c474073a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -271,6 +271,19 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); } +/* + * CPU iterators + * + * An extra gcwq is defined for an invalid cpu number + * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any + * specific CPU. The following iterators are similar to + * for_each_*_cpu() iterators but also considers the unbound gcwq. + * + * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND + * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND + * for_each_cwq_cpu() : possible CPUs for bound workqueues, + * WORK_CPU_UNBOUND for unbound workqueues + */ #define for_each_gcwq_cpu(cpu) \ for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ (cpu) < WORK_CPU_NONE; \ -- cgit v1.2.3-59-g8ed1b From 6ee0578b4daaea01c96b172c6aacca43fd9807a6 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 30 Jul 2010 14:57:37 -0700 Subject: workqueue: mark init_workqueues() as early_initcall() Mark init_workqueues() as early_initcall() and thus it will be initialized before smp bringup. init_workqueues() registers for the hotcpu notifier and thus it should cope with the processors that are brought online after the workqueues are initialized. x86 smp bringup code uses workqueues and uses a workaround for the cold boot process (as the workqueues are initialized post smp_init()). Marking init_workqueues() as early_initcall() will pave the way for cleaning up this code. Signed-off-by: Suresh Siddha Signed-off-by: Tejun Heo Cc: Oleg Nesterov Cc: Andrew Morton --- include/linux/workqueue.h | 1 - init/main.c | 2 -- kernel/workqueue.c | 4 +++- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 5f76001c4e6d..51dc9a727e5e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -327,7 +327,6 @@ extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, extern int schedule_on_each_cpu(work_func_t func); extern int keventd_up(void); -extern void init_workqueues(void); int execute_in_process_context(work_func_t fn, struct execute_work *); extern int flush_work(struct work_struct *work); diff --git a/init/main.c b/init/main.c index 3bdb152f412f..5f2ec2cdd900 100644 --- a/init/main.c +++ b/init/main.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -786,7 +785,6 @@ static void __init do_initcalls(void) */ static void __init do_basic_setup(void) { - init_workqueues(); cpuset_init_smp(); usermodehelper_init(); init_tmpfs(); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1105c474073a..e2eb351d9152 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3507,7 +3507,7 @@ out_unlock: } #endif /* CONFIG_FREEZER */ -void __init init_workqueues(void) +static int __init init_workqueues(void) { unsigned int cpu; int i; @@ -3559,4 +3559,6 @@ void __init init_workqueues(void) system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); + return 0; } +early_initcall(init_workqueues); -- cgit v1.2.3-59-g8ed1b