aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2021-01-15 08:49:57 +0200
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2021-01-15 08:49:57 +0200
commitd263dfa7d2697a43f3299b9731cd568ee49cdd2c (patch)
treef5d993c7000ac41ee926acf0b357ec572ef7908f /kernel/workqueue.c
parentdrm/i915/gt: Prune inlines (diff)
parentMerge tag 'drm-intel-gt-next-2021-01-14' of git://anongit.freedesktop.org/drm/drm-intel into drm-next (diff)
downloadwireguard-linux-d263dfa7d2697a43f3299b9731cd568ee49cdd2c.tar.xz
wireguard-linux-d263dfa7d2697a43f3299b9731cd568ee49cdd2c.zip
Merge drm/drm-next into drm-intel-gt-next
Backmerging to get a common base for merging topic branches between drm-intel-next and drm-intel-gt-next. Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 437935e7a199..9880b6c0e272 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1327,6 +1327,9 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
{
struct worker_pool *pool = pwq->pool;
+ /* record the work call stack in order to print it in KASAN reports */
+ kasan_record_aux_stack(work);
+
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
list_add_tail(&work->entry, head);
@@ -3728,17 +3731,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
* is updated and visible.
*/
if (!freezable || !workqueue_freezing) {
+ bool kick = false;
+
pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->delayed_works) &&
- pwq->nr_active < pwq->max_active)
+ pwq->nr_active < pwq->max_active) {
pwq_activate_first_delayed(pwq);
+ kick = true;
+ }
/*
* Need to kick a worker after thawed or an unbound wq's
- * max_active is bumped. It's a slow path. Do it always.
+ * max_active is bumped. In realtime scenarios, always kicking a
+ * worker will cause interference on the isolated cpu cores, so
+ * let's kick iff work items were activated.
*/
- wake_up_worker(pwq->pool);
+ if (kick)
+ wake_up_worker(pwq->pool);
} else {
pwq->max_active = 0;
}
@@ -4908,6 +4918,10 @@ static void unbind_workers(int cpu)
pool->flags |= POOL_DISASSOCIATED;
raw_spin_unlock_irq(&pool->lock);
+
+ for_each_pool_worker(worker, pool)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
+
mutex_unlock(&wq_pool_attach_mutex);
/*