aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorCarsten Emde <Carsten.Emde@osadl.org>2009-09-13 01:41:31 +0200
committerSteven Rostedt <rostedt@goodmis.org>2009-09-12 21:44:13 -0400
commit41dfba4367109b92d92ec6e059be6950497d932f (patch)
treece9fe350c5b0e1c46586a45a088a4cf6913deb86 /kernel/trace
parenttracing: prevent NULL pointer dereference in ftrace_raw_event_block_bio_bounce (diff)
downloadlinux-dev-41dfba4367109b92d92ec6e059be6950497d932f.tar.xz
linux-dev-41dfba4367109b92d92ec6e059be6950497d932f.zip
tracing: remove unused local variables in tracer probe functions
When the nsecs_to_usecs() conversion in probe_wakeup_sched_switch() and check_critical_timing() was moved to a later stage in order to avoid unnecessary computing, it was overlooked to remove the original variables, assignments and comments.. Signed-off-by: Carsten Emde <C.Emde@osadl.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_irqsoff.c12
-rw-r--r--kernel/trace/trace_sched_wakeup.c10
2 files changed, 1 insertions, 21 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5555b75a0d12..06f8ea9e4b9d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -129,15 +129,10 @@ check_critical_timing(struct trace_array *tr,
unsigned long parent_ip,
int cpu)
{
- unsigned long latency, t0, t1;
cycle_t T0, T1, delta;
unsigned long flags;
int pc;
- /*
- * usecs conversion is slow so we try to delay the conversion
- * as long as possible:
- */
T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
delta = T1-T0;
@@ -157,17 +152,12 @@ check_critical_timing(struct trace_array *tr,
trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
- latency = nsecs_to_usecs(delta);
-
if (data->critical_sequence != max_sequence)
goto out_unlock;
- tracing_max_latency = delta;
- t0 = nsecs_to_usecs(T0);
- t1 = nsecs_to_usecs(T1);
-
data->critical_end = parent_ip;
+ tracing_max_latency = delta;
update_max_tr_single(tr, current, cpu);
max_sequence++;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index cf43bdb1763a..6e1529bc6172 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -110,7 +110,6 @@ static void notrace
probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
- unsigned long latency = 0, t0 = 0, t1 = 0;
struct trace_array_cpu *data;
cycle_t T0, T1, delta;
unsigned long flags;
@@ -156,10 +155,6 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
- /*
- * usecs conversion is slow so we try to delay the conversion
- * as long as possible:
- */
T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
delta = T1-T0;
@@ -167,12 +162,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
if (!report_latency(delta))
goto out_unlock;
- latency = nsecs_to_usecs(delta);
-
tracing_max_latency = delta;
- t0 = nsecs_to_usecs(T0);
- t1 = nsecs_to_usecs(T1);
-
update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
out_unlock: