aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@srostedt@redhat.com>2008-05-12 21:20:49 +0200
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 20:55:55 +0200
commit6fb44b717c10ecf37beaaebd312f3afa93fed714 (patch)
treea86ec44e761ac9ea2cae992fb6351cbfbea434ac /kernel/trace/trace_irqsoff.c
parentftrace: allow the event pipe to be polled (diff)
downloadlinux-dev-6fb44b717c10ecf37beaaebd312f3afa93fed714.tar.xz
linux-dev-6fb44b717c10ecf37beaaebd312f3afa93fed714.zip
ftrace: add trace_function api for other tracers to use
A new check was added in the ftrace function that wont trace if the CPU trace buffer is disabled. Unfortunately, other tracers used ftrace() to write to the buffer after they disabled it. The new disable check makes these calls into a nop. This patch changes the __ftrace that is called without the check into a new api for the other tracers to use, called "trace_function". The other tracers use this interface instead when the trace CPU buffer is already disabled. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d2a6e6f1ad2d..3269f4ff5172 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
- ftrace(tr, data, ip, parent_ip, flags);
+ trace_function(tr, data, ip, parent_ip, flags);
atomic_dec(&data->disabled);
}
@@ -150,7 +150,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out_unlock;
- ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
+ trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
latency = nsecs_to_usecs(delta);
@@ -188,7 +188,7 @@ out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
tracing_reset(data);
- ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
+ trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
}
static inline void notrace
@@ -221,7 +221,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
local_save_flags(flags);
- ftrace(tr, data, ip, parent_ip, flags);
+ trace_function(tr, data, ip, parent_ip, flags);
__get_cpu_var(tracing_cpu) = 1;
@@ -254,7 +254,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
atomic_inc(&data->disabled);
local_save_flags(flags);
- ftrace(tr, data, ip, parent_ip, flags);
+ trace_function(tr, data, ip, parent_ip, flags);
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);