aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-19 12:05:25 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-19 12:05:25 +0200
commitbe4bdbfbae6b303c21ebe446648f617908a794b5 (patch)
tree4ea485b2fa95cb17aded35b0ac83d6bdf9a0396d /kernel
parenttracing: Remove markers (diff)
parenttracing: Allocate the ftrace event profile buffer dynamically (diff)
downloadlinux-dev-be4bdbfbae6b303c21ebe446648f617908a794b5.tar.xz
linux-dev-be4bdbfbae6b303c21ebe446648f617908a794b5.zip
Merge branch 'tracing/core-v3' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into tracing/urgent
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_event_profile.c79
-rw-r--r--kernel/trace/trace_syscalls.c97
2 files changed, 155 insertions, 21 deletions
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 55a25c933d15..3aaa77c3309b 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -8,6 +8,54 @@
#include <linux/module.h>
#include "trace.h"
+/*
+ * We can't use a size but a type in alloc_percpu()
+ * So let's create a dummy type that matches the desired size
+ */
+typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
+
+char *trace_profile_buf;
+char *trace_profile_buf_nmi;
+
+/* Count the events in use (per event id, not per instance) */
+static int total_profile_count;
+
+static int ftrace_profile_enable_event(struct ftrace_event_call *event)
+{
+ char *buf;
+ int ret = -ENOMEM;
+
+ if (atomic_inc_return(&event->profile_count))
+ return 0;
+
+ if (!total_profile_count++) {
+ buf = (char *)alloc_percpu(profile_buf_t);
+ if (!buf)
+ goto fail_buf;
+
+ rcu_assign_pointer(trace_profile_buf, buf);
+
+ buf = (char *)alloc_percpu(profile_buf_t);
+ if (!buf)
+ goto fail_buf_nmi;
+
+ rcu_assign_pointer(trace_profile_buf_nmi, buf);
+ }
+
+ ret = event->profile_enable();
+ if (!ret)
+ return 0;
+
+ kfree(trace_profile_buf_nmi);
+fail_buf_nmi:
+ kfree(trace_profile_buf);
+fail_buf:
+ total_profile_count--;
+ atomic_dec(&event->profile_count);
+
+ return ret;
+}
+
int ftrace_profile_enable(int event_id)
{
struct ftrace_event_call *event;
@@ -17,7 +65,7 @@ int ftrace_profile_enable(int event_id)
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id && event->profile_enable &&
try_module_get(event->mod)) {
- ret = event->profile_enable(event);
+ ret = ftrace_profile_enable_event(event);
break;
}
}
@@ -26,6 +74,33 @@ int ftrace_profile_enable(int event_id)
return ret;
}
+static void ftrace_profile_disable_event(struct ftrace_event_call *event)
+{
+ char *buf, *nmi_buf;
+
+ if (!atomic_add_negative(-1, &event->profile_count))
+ return;
+
+ event->profile_disable();
+
+ if (!--total_profile_count) {
+ buf = trace_profile_buf;
+ rcu_assign_pointer(trace_profile_buf, NULL);
+
+ nmi_buf = trace_profile_buf_nmi;
+ rcu_assign_pointer(trace_profile_buf_nmi, NULL);
+
+ /*
+ * Ensure every events in profiling have finished before
+ * releasing the buffers
+ */
+ synchronize_sched();
+
+ free_percpu(buf);
+ free_percpu(nmi_buf);
+ }
+}
+
void ftrace_profile_disable(int event_id)
{
struct ftrace_event_call *event;
@@ -33,7 +108,7 @@ void ftrace_profile_disable(int event_id)
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id) {
- event->profile_disable(event);
+ ftrace_profile_disable_event(event);
module_put(event->mod);
break;
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 8712ce3c6a0e..7a3550cf2597 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -384,10 +384,13 @@ static int sys_prof_refcount_exit;
static void prof_syscall_enter(struct pt_regs *regs, long id)
{
- struct syscall_trace_enter *rec;
struct syscall_metadata *sys_data;
+ struct syscall_trace_enter *rec;
+ unsigned long flags;
+ char *raw_data;
int syscall_nr;
int size;
+ int cpu;
syscall_nr = syscall_get_nr(current, regs);
if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
@@ -402,20 +405,38 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
size = ALIGN(size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- do {
- char raw_data[size];
+ if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+ "profile buffer not large enough"))
+ return;
+
+ /* Protect the per cpu buffer, begin the rcu read side */
+ local_irq_save(flags);
- /* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+ cpu = smp_processor_id();
+
+ if (in_nmi())
+ raw_data = rcu_dereference(trace_profile_buf_nmi);
+ else
+ raw_data = rcu_dereference(trace_profile_buf);
+
+ if (!raw_data)
+ goto end;
- rec = (struct syscall_trace_enter *) raw_data;
- tracing_generic_entry_update(&rec->ent, 0, 0);
- rec->ent.type = sys_data->enter_id;
- rec->nr = syscall_nr;
- syscall_get_arguments(current, regs, 0, sys_data->nb_args,
- (unsigned long *)&rec->args);
- perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
- } while(0);
+ raw_data = per_cpu_ptr(raw_data, cpu);
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ rec = (struct syscall_trace_enter *) raw_data;
+ tracing_generic_entry_update(&rec->ent, 0, 0);
+ rec->ent.type = sys_data->enter_id;
+ rec->nr = syscall_nr;
+ syscall_get_arguments(current, regs, 0, sys_data->nb_args,
+ (unsigned long *)&rec->args);
+ perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
+
+end:
+ local_irq_restore(flags);
}
int reg_prof_syscall_enter(char *name)
@@ -460,8 +481,12 @@ void unreg_prof_syscall_enter(char *name)
static void prof_syscall_exit(struct pt_regs *regs, long ret)
{
struct syscall_metadata *sys_data;
- struct syscall_trace_exit rec;
+ struct syscall_trace_exit *rec;
+ unsigned long flags;
int syscall_nr;
+ char *raw_data;
+ int size;
+ int cpu;
syscall_nr = syscall_get_nr(current, regs);
if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
@@ -471,12 +496,46 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
if (!sys_data)
return;
- tracing_generic_entry_update(&rec.ent, 0, 0);
- rec.ent.type = sys_data->exit_id;
- rec.nr = syscall_nr;
- rec.ret = syscall_get_return_value(current, regs);
+ /* We can probably do that at build time */
+ size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
- perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec));
+ /*
+ * Impossible, but be paranoid with the future
+ * How to put this check outside runtime?
+ */
+ if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+ "exit event has grown above profile buffer size"))
+ return;
+
+ /* Protect the per cpu buffer, begin the rcu read side */
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+
+ if (in_nmi())
+ raw_data = rcu_dereference(trace_profile_buf_nmi);
+ else
+ raw_data = rcu_dereference(trace_profile_buf);
+
+ if (!raw_data)
+ goto end;
+
+ raw_data = per_cpu_ptr(raw_data, cpu);
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ rec = (struct syscall_trace_exit *)raw_data;
+
+ tracing_generic_entry_update(&rec->ent, 0, 0);
+ rec->ent.type = sys_data->exit_id;
+ rec->nr = syscall_nr;
+ rec->ret = syscall_get_return_value(current, regs);
+
+ perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size);
+
+end:
+ local_irq_restore(flags);
}
int reg_prof_syscall_exit(char *name)