aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-05-12 21:20:51 +0200
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 21:04:06 +0200
commit57422797dc009fc83766bcf230d29dbe6e08e21e (patch)
tree9ba5768539c08d99679f72a9709f5e345fd359c9 /kernel
parentftrace: remove notrace (diff)
downloadlinux-dev-57422797dc009fc83766bcf230d29dbe6e08e21e.tar.xz
linux-dev-57422797dc009fc83766bcf230d29dbe6e08e21e.zip
ftrace: add wakeup events to sched tracer
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c42
-rw-r--r--kernel/trace/trace.h12
-rw-r--r--kernel/trace/trace_sched_switch.c36
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_selftest.c1
5 files changed, 88 insertions, 5 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f5898051fdd9..192c1354a7e0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -57,7 +57,7 @@ static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
static int tracer_enabled = 1;
-static unsigned long trace_nr_entries = 16384UL;
+static unsigned long trace_nr_entries = 65536UL;
static struct tracer *trace_types __read_mostly;
static struct tracer *current_trace __read_mostly;
@@ -87,6 +87,7 @@ enum trace_type {
TRACE_FN,
TRACE_CTX,
+ TRACE_WAKE,
TRACE_SPECIAL,
__TRACE_LAST_TYPE
@@ -711,6 +712,30 @@ tracing_sched_switch_trace(struct trace_array *tr,
wake_up (&trace_wait);
}
+void
+tracing_sched_wakeup_trace(struct trace_array *tr,
+ struct trace_array_cpu *data,
+ struct task_struct *wakee, struct task_struct *curr,
+ unsigned long flags)
+{
+ struct trace_entry *entry;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&data->lock, irq_flags);
+ entry = tracing_get_trace_entry(tr, data);
+ tracing_generic_entry_update(entry, flags);
+ entry->type = TRACE_WAKE;
+ entry->ctx.prev_pid = curr->pid;
+ entry->ctx.prev_prio = curr->prio;
+ entry->ctx.prev_state = curr->state;
+ entry->ctx.next_pid = wakee->pid;
+ entry->ctx.next_prio = wakee->prio;
+ spin_unlock_irqrestore(&data->lock, irq_flags);
+
+ if (!(trace_flags & TRACE_ITER_BLOCK))
+ wake_up(&trace_wait);
+}
+
#ifdef CONFIG_FTRACE
static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
@@ -1183,13 +1208,14 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
trace_seq_puts(s, ")\n");
break;
case TRACE_CTX:
+ case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
comm = trace_find_cmdline(entry->ctx.next_pid);
- trace_seq_printf(s, " %d:%d:%c --> %d:%d %s\n",
+ trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d %s\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
- S,
+ S, entry->type == TRACE_CTX ? "==>" : " +",
entry->ctx.next_pid,
entry->ctx.next_prio,
comm);
@@ -1256,12 +1282,14 @@ static int print_trace_fmt(struct trace_iterator *iter)
return 0;
break;
case TRACE_CTX:
+ case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
- ret = trace_seq_printf(s, " %d:%d:%c ==> %d:%d\n",
+ ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S,
+ entry->type == TRACE_CTX ? "==>" : " +",
entry->ctx.next_pid,
entry->ctx.next_prio);
if (!ret)
@@ -1301,8 +1329,11 @@ static int print_raw_fmt(struct trace_iterator *iter)
return 0;
break;
case TRACE_CTX:
+ case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
+ if (entry->type == TRACE_WAKE)
+ S = '+';
ret = trace_seq_printf(s, "%d %d %c %d %d\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
@@ -1355,8 +1386,11 @@ static int print_hex_fmt(struct trace_iterator *iter)
SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
break;
case TRACE_CTX:
+ case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
+ if (entry->type == TRACE_WAKE)
+ S = '+';
SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
SEQ_PUT_HEX_FIELD_RET(s, S);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2b7352bf1ce6..90e0ba0f6eba 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -164,6 +164,12 @@ void tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *next,
unsigned long flags);
void tracing_record_cmdline(struct task_struct *tsk);
+
+void tracing_sched_wakeup_trace(struct trace_array *tr,
+ struct trace_array_cpu *data,
+ struct task_struct *wakee,
+ struct task_struct *cur,
+ unsigned long flags);
void trace_special(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long arg1,
@@ -194,11 +200,17 @@ extern cycle_t ftrace_now(int cpu);
#ifdef CONFIG_SCHED_TRACER
extern void
wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
+extern void
+wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr);
#else
static inline void
wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
{
}
+static inline void
+wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
+{
+}
#endif
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index b738eaca1dbe..8b1cf1a3aee0 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -41,6 +41,29 @@ ctx_switch_func(struct task_struct *prev, struct task_struct *next)
local_irq_restore(flags);
}
+static void wakeup_func(struct task_struct *wakee, struct task_struct *curr)
+{
+ struct trace_array *tr = ctx_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+
+ if (!tracer_enabled)
+ return;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1))
+ tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
{
tracing_record_cmdline(prev);
@@ -57,6 +80,19 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
wakeup_sched_switch(prev, next);
}
+void
+ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
+{
+ tracing_record_cmdline(curr);
+
+ wakeup_func(wakee, curr);
+
+ /*
+ * Chain to the wakeup tracer (this is a NOP if disabled):
+ */
+ wakeup_sched_wakeup(wakee, curr);
+}
+
static void sched_switch_reset(struct trace_array *tr)
{
int cpu;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 662679c78b66..87fa7b253b57 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -202,7 +202,7 @@ out:
}
void
-ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
+wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
{
if (likely(!tracer_enabled))
return;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 85715b86a342..39dd452647da 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -8,6 +8,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
switch (entry->type) {
case TRACE_FN:
case TRACE_CTX:
+ case TRACE_WAKE:
return 1;
}
return 0;