aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-03-20 14:29:08 -0700
committerPaul E. McKenney <paulmck@kernel.org>2020-04-27 11:03:52 -0700
commitb38f57c1fe64276773b124dffb0a139cc32ab3cb (patch)
treeb69915f8632122f65bc6a692bf8edfbec71228ff /kernel/rcu
parentrcu-tasks: Avoid IPIing userspace/idle tasks if kernel is so built (diff)
downloadlinux-dev-b38f57c1fe64276773b124dffb0a139cc32ab3cb.tar.xz
linux-dev-b38f57c1fe64276773b124dffb0a139cc32ab3cb.zip
rcu-tasks: Allow rcu_read_unlock_trace() under scheduler locks
The rcu_read_unlock_trace() can invoke rcu_read_unlock_trace_special(), which in turn can call wake_up(). Therefore, if any scheduler lock is held across a call to rcu_read_unlock_trace(), self-deadlock can occur. This commit therefore uses the irq_work facility to defer the wake_up() to a clean environment where no scheduler locks will be held. Reported-by: Steven Rostedt <rostedt@goodmis.org> [ paulmck: Update #includes for m68k per kbuild test robot. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tasks.h12
-rw-r--r--kernel/rcu/update.c1
2 files changed, 12 insertions, 1 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index a9e8ecb10860..dd311e93ed0f 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -729,6 +729,16 @@ void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
"RCU Tasks Trace");
+/*
+ * This irq_work handler allows rcu_read_unlock_trace() to be invoked
+ * while the scheduler locks are held.
+ */
+static void rcu_read_unlock_iw(struct irq_work *iwp)
+{
+ wake_up(&trc_wait);
+}
+static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
+
/* If we are the last reader, wake up the grace-period kthread. */
void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
{
@@ -742,7 +752,7 @@ void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
WRITE_ONCE(t->trc_reader_nesting, nesting);
if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
- wake_up(&trc_wait);
+ irq_work_queue(&rcu_tasks_trace_iw);
}
EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index c5799349ff31..b1f07a0e3a56 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -41,6 +41,7 @@
#include <linux/sched/isolation.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
+#include <linux/irq_work.h>
#define CREATE_TRACE_POINTS