aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-06 01:14:26 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-07 20:02:55 -0500
commit9a5fd902273d01170fd033691bd70b142baa7309 (patch)
treef3364fa8917db8eadc5235f77b5245081c0cc028
parentnmi: add generic nmi tracking state (diff)
downloadlinux-dev-9a5fd902273d01170fd033691bd70b142baa7309.tar.xz
linux-dev-9a5fd902273d01170fd033691bd70b142baa7309.zip
ftrace: change function graph tracer to use new in_nmi
The function graph tracer piggy backed onto the dynamic ftracer to use the in_nmi custom code for dynamic tracing. The problem was (as Andrew Morton pointed out) it really only wanted to bail out if the context of the current CPU was in NMI context. But the dynamic ftrace in_nmi custom code was true if _any_ CPU happened to be in NMI context. Now that we have a generic in_nmi interface, this patch changes the function graph code to use it instead of the dynamic ftarce custom code. Reported-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Steven Rostedt <srostedt@redhat.com>
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/ftrace.c21
2 files changed, 2 insertions, 21 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a6be725cb049..2cf7bbcaed4e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -34,7 +34,7 @@ config X86
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
- select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE || FUNCTION_GRAPH_TRACER
+ select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index e3fad2ef622c..918073c6681b 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -367,25 +367,6 @@ int ftrace_disable_ftrace_graph_caller(void)
return ftrace_mod_jmp(ip, old_offset, new_offset);
}
-#else /* CONFIG_DYNAMIC_FTRACE */
-
-/*
- * These functions are picked from those used on
- * this page for dynamic ftrace. They have been
- * simplified to ignore all traces in NMI context.
- */
-static atomic_t nmi_running;
-
-void arch_ftrace_nmi_enter(void)
-{
- atomic_inc(&nmi_running);
-}
-
-void arch_ftrace_nmi_exit(void)
-{
- atomic_dec(&nmi_running);
-}
-
#endif /* !CONFIG_DYNAMIC_FTRACE */
/* Add a function return address to the trace stack on thread info.*/
@@ -475,7 +456,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
&return_to_handler;
/* Nmi's are currently unsupported */
- if (unlikely(atomic_read(&nmi_running)))
+ if (unlikely(in_nmi()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))