aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/hardirq.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-02-12 21:01:16 +0100
committerThomas Gleixner <tglx@linutronix.de>2020-05-19 15:51:18 +0200
commitf93524eb9c54f49be150167918f6546b0a2e09b1 (patch)
tree2c480d01f1f014baf454abb8ce8f52adac0ad6d4 /include/linux/hardirq.h
parentsh/ftrace: Move arch_ftrace_nmi_{enter,exit} into nmi exception (diff)
downloadlinux-dev-f93524eb9c54f49be150167918f6546b0a2e09b1.tar.xz
linux-dev-f93524eb9c54f49be150167918f6546b0a2e09b1.zip
sched,rcu,tracing: Avoid tracing before in_nmi() is correct
If a tracer is invoked before in_nmi() becomes true, the tracer can no longer detect it is called from NMI context and behave correctly. Therefore change nmi_{enter,exit}() to use __preempt_count_{add,sub}() as the normal preempt_count_{add,sub}() have a (desired) function trace entry. This fixes a potential issue with the current code; when the function-tracer has stack-tracing enabled __trace_stack() will malfunction when it hits the preempt_count_add() function entry from NMI context. Suggested-by: Steven Rostedt (VMware) <rosted@goodmis.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com> Link: https://lkml.kernel.org/r/20200505134101.434193525@linutronix.de
Diffstat (limited to 'include/linux/hardirq.h')
-rw-r--r--include/linux/hardirq.h13
1 files changed, 11 insertions, 2 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index a043ad826c67..621556efe45f 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -66,6 +66,15 @@ extern void irq_exit(void);
#endif
/*
+ * NMI vs Tracing
+ * --------------
+ *
+ * We must not land in a tracer until (or after) we've changed preempt_count
+ * such that in_nmi() becomes true. To that effect all NMI C entry points must
+ * be marked 'notrace' and call nmi_enter() as soon as possible.
+ */
+
+/*
* nmi_enter() can nest up to 15 times; see NMI_BITS.
*/
#define nmi_enter() \
@@ -75,7 +84,7 @@ extern void irq_exit(void);
lockdep_off(); \
ftrace_nmi_enter(); \
BUG_ON(in_nmi() == NMI_MASK); \
- preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
+ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
lockdep_hardirq_enter(); \
} while (0)
@@ -85,7 +94,7 @@ extern void irq_exit(void);
lockdep_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
- preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \
lockdep_on(); \
printk_nmi_exit(); \