aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bc88fd939f4e..301db4406bc3 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2266,7 +2266,7 @@ __acquires(&pool->lock)
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
*/
- trace_workqueue_execute_end(work);
+ trace_workqueue_execute_end(work, worker->current_func);
lock_map_release(&lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
@@ -2280,7 +2280,7 @@ __acquires(&pool->lock)
}
/*
- * The following prevents a kworker from hogging CPU on !PREEMPT
+ * The following prevents a kworker from hogging CPU on !PREEMPTION
* kernels, where a requeueing work item waiting for something to
* happen could deadlock with stop_machine as such work item could
* indefinitely requeue itself while all other CPUs are trapped in
@@ -4374,8 +4374,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
for_each_pwq(pwq, wq) {
spin_lock_irq(&pwq->pool->lock);
if (WARN_ON(pwq_busy(pwq))) {
- pr_warning("%s: %s has the following busy pwq\n",
- __func__, wq->name);
+ pr_warn("%s: %s has the following busy pwq\n",
+ __func__, wq->name);
show_pwq(pwq);
spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex);