aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig15
-rw-r--r--kernel/trace/ftrace.c634
-rw-r--r--kernel/trace/ring_buffer.c7
-rw-r--r--kernel/trace/trace.c174
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_boot.c5
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_event_profile.c1
-rw-r--r--kernel/trace/trace_functions_graph.c25
-rw-r--r--kernel/trace/trace_mmiotrace.c6
-rw-r--r--kernel/trace/trace_output.c10
-rw-r--r--kernel/trace/trace_output.h2
-rw-r--r--kernel/trace/trace_printk.c6
-rw-r--r--kernel/trace/trace_sched_switch.c10
-rw-r--r--kernel/trace/trace_stack.c13
-rw-r--r--kernel/trace/trace_stat.c2
-rw-r--r--kernel/trace/trace_stat.h2
-rw-r--r--kernel/trace/trace_sysprof.c6
-rw-r--r--kernel/trace/trace_workqueue.c2
19 files changed, 738 insertions, 196 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 2246141bda4d..23b96ebbf893 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -104,6 +104,7 @@ config FUNCTION_GRAPH_TRACER
the return value. This is done by setting the current return
address on the current task structure into a stack of calls.
+
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
@@ -375,6 +376,20 @@ config DYNAMIC_FTRACE
were made. If so, it runs stop_machine (stops all CPUS)
and modifies the code to jump over the call to ftrace.
+config FUNCTION_PROFILER
+ bool "Kernel function profiler"
+ depends on FUNCTION_TRACER
+ default n
+ help
+ This option enables the kernel function profiler. A file is created
+ in debugfs called function_profile_enabled which defaults to zero.
+ When a 1 is echoed into this file profiling begins, and when a
+ zero is entered, profiling stops. A file in the trace_stats
+ directory called functions, that show the list of functions that
+ have been hit and their counters.
+
+ If in doubt, say N
+
config FTRACE_MCOUNT_RECORD
def_bool y
depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f1ed080406c3..8e6a0b5c9940 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -33,7 +33,8 @@
#include <asm/ftrace.h>
-#include "trace.h"
+#include "trace_output.h"
+#include "trace_stat.h"
#define FTRACE_WARN_ON(cond) \
do { \
@@ -68,7 +69,7 @@ static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops ftrace_list_end __read_mostly =
{
- .func = ftrace_stub,
+ .func = ftrace_stub,
};
static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
@@ -240,6 +241,576 @@ static void ftrace_update_pid_func(void)
#endif
}
+#ifdef CONFIG_FUNCTION_PROFILER
+struct ftrace_profile {
+ struct hlist_node node;
+ unsigned long ip;
+ unsigned long counter;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ unsigned long long time;
+#endif
+};
+
+struct ftrace_profile_page {
+ struct ftrace_profile_page *next;
+ unsigned long index;
+ struct ftrace_profile records[];
+};
+
+struct ftrace_profile_stat {
+ atomic_t disabled;
+ struct hlist_head *hash;
+ struct ftrace_profile_page *pages;
+ struct ftrace_profile_page *start;
+ struct tracer_stat stat;
+};
+
+#define PROFILE_RECORDS_SIZE \
+ (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
+
+#define PROFILES_PER_PAGE \
+ (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
+
+static int ftrace_profile_bits __read_mostly;
+static int ftrace_profile_enabled __read_mostly;
+
+/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
+static DEFINE_MUTEX(ftrace_profile_lock);
+
+static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
+
+#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
+
+static void *
+function_stat_next(void *v, int idx)
+{
+ struct ftrace_profile *rec = v;
+ struct ftrace_profile_page *pg;
+
+ pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
+
+ again:
+ rec++;
+ if ((void *)rec >= (void *)&pg->records[pg->index]) {
+ pg = pg->next;
+ if (!pg)
+ return NULL;
+ rec = &pg->records[0];
+ if (!rec->counter)
+ goto again;
+ }
+
+ return rec;
+}
+
+static void *function_stat_start(struct tracer_stat *trace)
+{
+ struct ftrace_profile_stat *stat =
+ container_of(trace, struct ftrace_profile_stat, stat);
+
+ if (!stat || !stat->start)
+ return NULL;
+
+ return function_stat_next(&stat->start->records[0], 0);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* function graph compares on total time */
+static int function_stat_cmp(void *p1, void *p2)
+{
+ struct ftrace_profile *a = p1;
+ struct ftrace_profile *b = p2;
+
+ if (a->time < b->time)
+ return -1;
+ if (a->time > b->time)
+ return 1;
+ else
+ return 0;
+}
+#else
+/* not function graph compares against hits */
+static int function_stat_cmp(void *p1, void *p2)
+{
+ struct ftrace_profile *a = p1;
+ struct ftrace_profile *b = p2;
+
+ if (a->counter < b->counter)
+ return -1;
+ if (a->counter > b->counter)
+ return 1;
+ else
+ return 0;
+}
+#endif
+
+static int function_stat_headers(struct seq_file *m)
+{
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ seq_printf(m, " Function "
+ "Hit Time Avg\n"
+ " -------- "
+ "--- ---- ---\n");
+#else
+ seq_printf(m, " Function Hit\n"
+ " -------- ---\n");
+#endif
+ return 0;
+}
+
+static int function_stat_show(struct seq_file *m, void *v)
+{
+ struct ftrace_profile *rec = v;
+ char str[KSYM_SYMBOL_LEN];
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ static DEFINE_MUTEX(mutex);
+ static struct trace_seq s;
+ unsigned long long avg;
+#endif
+
+ kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+ seq_printf(m, " %-30.30s %10lu", str, rec->counter);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ seq_printf(m, " ");
+ avg = rec->time;
+ do_div(avg, rec->counter);
+
+ mutex_lock(&mutex);
+ trace_seq_init(&s);
+ trace_print_graph_duration(rec->time, &s);
+ trace_seq_puts(&s, " ");
+ trace_print_graph_duration(avg, &s);
+ trace_print_seq(m, &s);
+ mutex_unlock(&mutex);
+#endif
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
+{
+ struct ftrace_profile_page *pg;
+
+ pg = stat->pages = stat->start;
+
+ while (pg) {
+ memset(pg->records, 0, PROFILE_RECORDS_SIZE);
+ pg->index = 0;
+ pg = pg->next;
+ }
+
+ memset(stat->hash, 0,
+ FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
+}
+
+int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+{
+ struct ftrace_profile_page *pg;
+ int functions;
+ int pages;
+ int i;
+
+ /* If we already allocated, do nothing */
+ if (stat->pages)
+ return 0;
+
+ stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!stat->pages)
+ return -ENOMEM;
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+ functions = ftrace_update_tot_cnt;
+#else
+ /*
+ * We do not know the number of functions that exist because
+ * dynamic tracing is what counts them. With past experience
+ * we have around 20K functions. That should be more than enough.
+ * It is highly unlikely we will execute every function in
+ * the kernel.
+ */
+ functions = 20000;
+#endif
+
+ pg = stat->start = stat->pages;
+
+ pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+ for (i = 0; i < pages; i++) {
+ pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pg->next)
+ goto out_free;
+ pg = pg->next;
+ }
+
+ return 0;
+
+ out_free:
+ pg = stat->start;
+ while (pg) {
+ unsigned long tmp = (unsigned long)pg;
+
+ pg = pg->next;
+ free_page(tmp);
+ }
+
+ free_page((unsigned long)stat->pages);
+ stat->pages = NULL;
+ stat->start = NULL;
+
+ return -ENOMEM;
+}
+
+static int ftrace_profile_init_cpu(int cpu)
+{
+ struct ftrace_profile_stat *stat;
+ int size;
+
+ stat = &per_cpu(ftrace_profile_stats, cpu);
+
+ if (stat->hash) {
+ /* If the profile is already created, simply reset it */
+ ftrace_profile_reset(stat);
+ return 0;
+ }
+
+ /*
+ * We are profiling all functions, but usually only a few thousand
+ * functions are hit. We'll make a hash of 1024 items.
+ */
+ size = FTRACE_PROFILE_HASH_SIZE;
+
+ stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
+
+ if (!stat->hash)
+ return -ENOMEM;
+
+ if (!ftrace_profile_bits) {
+ size--;
+
+ for (; size; size >>= 1)
+ ftrace_profile_bits++;
+ }
+
+ /* Preallocate the function profiling pages */
+ if (ftrace_profile_pages_init(stat) < 0) {
+ kfree(stat->hash);
+ stat->hash = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ftrace_profile_init(void)
+{
+ int cpu;
+ int ret = 0;
+
+ for_each_online_cpu(cpu) {
+ ret = ftrace_profile_init_cpu(cpu);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* interrupts must be disabled */
+static struct ftrace_profile *
+ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
+{
+ struct ftrace_profile *rec;
+ struct hlist_head *hhd;
+ struct hlist_node *n;
+ unsigned long key;
+
+ key = hash_long(ip, ftrace_profile_bits);
+ hhd = &stat->hash[key];
+
+ if (hlist_empty(hhd))
+ return NULL;
+
+ hlist_for_each_entry_rcu(rec, n, hhd, node) {
+ if (rec->ip == ip)
+ return rec;
+ }
+
+ return NULL;
+}
+
+static void ftrace_add_profile(struct ftrace_profile_stat *stat,
+ struct ftrace_profile *rec)
+{
+ unsigned long key;
+
+ key = hash_long(rec->ip, ftrace_profile_bits);
+ hlist_add_head_rcu(&rec->node, &stat->hash[key]);
+}
+
+/*
+ * The memory is already allocated, this simply finds a new record to use.
+ */
+static struct ftrace_profile *
+ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
+{
+ struct ftrace_profile *rec = NULL;
+
+ /* prevent recursion (from NMIs) */
+ if (atomic_inc_return(&stat->disabled) != 1)
+ goto out;
+
+ /*
+ * Try to find the function again since an NMI
+ * could have added it
+ */
+ rec = ftrace_find_profiled_func(stat, ip);
+ if (rec)
+ goto out;
+
+ if (stat->pages->index == PROFILES_PER_PAGE) {
+ if (!stat->pages->next)
+ goto out;
+ stat->pages = stat->pages->next;
+ }
+
+ rec = &stat->pages->records[stat->pages->index++];
+ rec->ip = ip;
+ ftrace_add_profile(stat, rec);
+
+ out:
+ atomic_dec(&stat->disabled);
+
+ return rec;
+}
+
+static void
+function_profile_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct ftrace_profile_stat *stat;
+ struct ftrace_profile *rec;
+ unsigned long flags;
+
+ if (!ftrace_profile_enabled)
+ return;
+
+ local_irq_save(flags);
+
+ stat = &__get_cpu_var(ftrace_profile_stats);
+ if (!stat->hash)
+ goto out;
+
+ rec = ftrace_find_profiled_func(stat, ip);
+ if (!rec) {
+ rec = ftrace_profile_alloc(stat, ip);
+ if (!rec)
+ goto out;
+ }
+
+ rec->counter++;
+ out:
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int profile_graph_entry(struct ftrace_graph_ent *trace)
+{
+ function_profile_call(trace->func, 0);
+ return 1;
+}
+
+static void profile_graph_return(struct ftrace_graph_ret *trace)
+{
+ struct ftrace_profile_stat *stat;
+ unsigned long long calltime;
+ struct ftrace_profile *rec;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ stat = &__get_cpu_var(ftrace_profile_stats);
+ if (!stat->hash)
+ goto out;
+
+ calltime = trace->rettime - trace->calltime;
+
+ if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+ int index;
+
+ index = trace->depth;
+
+ /* Append this call time to the parent time to subtract */
+ if (index)
+ current->ret_stack[index - 1].subtime += calltime;
+
+ if (current->ret_stack[index].subtime < calltime)
+ calltime -= current->ret_stack[index].subtime;
+ else
+ calltime = 0;
+ }
+
+ rec = ftrace_find_profiled_func(stat, trace->func);
+ if (rec)
+ rec->time += calltime;
+
+ out:
+ local_irq_restore(flags);
+}
+
+static int register_ftrace_profiler(void)
+{
+ return register_ftrace_graph(&profile_graph_return,
+ &profile_graph_entry);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+ unregister_ftrace_graph();
+}
+#else
+static struct ftrace_ops ftrace_profile_ops __read_mostly =
+{
+ .func = function_profile_call,
+};
+
+static int register_ftrace_profiler(void)
+{
+ return register_ftrace_function(&ftrace_profile_ops);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+ unregister_ftrace_function(&ftrace_profile_ops);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+static ssize_t
+ftrace_profile_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ char buf[64]; /* big enough to hold a number */
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ val = !!val;
+
+ mutex_lock(&ftrace_profile_lock);
+ if (ftrace_profile_enabled ^ val) {
+ if (val) {
+ ret = ftrace_profile_init();
+ if (ret < 0) {
+ cnt = ret;
+ goto out;
+ }
+
+ ret = register_ftrace_profiler();
+ if (ret < 0) {
+ cnt = ret;
+ goto out;
+ }
+ ftrace_profile_enabled = 1;
+ } else {
+ ftrace_profile_enabled = 0;
+ unregister_ftrace_profiler();
+ }
+ }
+ out:
+ mutex_unlock(&ftrace_profile_lock);
+
+ filp->f_pos += cnt;
+
+ return cnt;
+}
+
+static ssize_t
+ftrace_profile_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64]; /* big enough to hold a number */
+ int r;
+
+ r = sprintf(buf, "%u\n", ftrace_profile_enabled);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations ftrace_profile_fops = {
+ .open = tracing_open_generic,
+ .read = ftrace_profile_read,
+ .write = ftrace_profile_write,
+};
+
+/* used to initialize the real stat files */
+static struct tracer_stat function_stats __initdata = {
+ .name = "functions",
+ .stat_start = function_stat_start,
+ .stat_next = function_stat_next,
+ .stat_cmp = function_stat_cmp,
+ .stat_headers = function_stat_headers,
+ .stat_show = function_stat_show
+};
+
+static void ftrace_profile_debugfs(struct dentry *d_tracer)
+{
+ struct ftrace_profile_stat *stat;
+ struct dentry *entry;
+ char *name;
+ int ret;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stat = &per_cpu(ftrace_profile_stats, cpu);
+
+ /* allocate enough for function name + cpu number */
+ name = kmalloc(32, GFP_KERNEL);
+ if (!name) {
+ /*
+ * The files created are permanent, if something happens
+ * we still do not free memory.
+ */
+ kfree(stat);
+ WARN(1,
+ "Could not allocate stat file for cpu %d\n",
+ cpu);
+ return;
+ }
+ stat->stat = function_stats;
+ snprintf(name, 32, "function%d", cpu);
+ stat->stat.name = name;
+ ret = register_stat_tracer(&stat->stat);
+ if (ret) {
+ WARN(1,
+ "Could not register function stat for cpu %d\n",
+ cpu);
+ kfree(name);
+ return;
+ }
+ }
+
+ entry = debugfs_create_file("function_profile_enabled", 0644,
+ d_tracer, NULL, &ftrace_profile_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'function_profile_enabled' entry\n");
+}
+
+#else /* CONFIG_FUNCTION_PROFILER */
+static void ftrace_profile_debugfs(struct dentry *d_tracer)
+{
+}
+#endif /* CONFIG_FUNCTION_PROFILER */
+
/* set when tracing only a pid */
struct pid *ftrace_pid_trace;
static struct pid * const ftrace_swapper_pid = &init_struct_pid;
@@ -261,7 +832,6 @@ struct ftrace_func_probe {
struct rcu_head rcu;
};
-
enum {
FTRACE_ENABLE_CALLS = (1 << 0),
FTRACE_DISABLE_CALLS = (1 << 1),
@@ -1408,7 +1978,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_probe_ops __read_mostly =
{
- .func = function_trace_probe_call,
+ .func = function_trace_probe_call,
};
static int ftrace_probe_registered;
@@ -2128,38 +2698,23 @@ static const struct file_operations ftrace_graph_fops = {
static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
{
- struct dentry *entry;
- entry = debugfs_create_file("available_filter_functions", 0444,
- d_tracer, NULL, &ftrace_avail_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'available_filter_functions' entry\n");
+ trace_create_file("available_filter_functions", 0444,
+ d_tracer, NULL, &ftrace_avail_fops);
- entry = debugfs_create_file("failures", 0444,
- d_tracer, NULL, &ftrace_failures_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'failures' entry\n");
+ trace_create_file("failures", 0444,
+ d_tracer, NULL, &ftrace_failures_fops);
- entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
- NULL, &ftrace_filter_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_ftrace_filter' entry\n");
+ trace_create_file("set_ftrace_filter", 0644, d_tracer,
+ NULL, &ftrace_filter_fops);
- entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
+ trace_create_file("set_ftrace_notrace", 0644, d_tracer,
NULL, &ftrace_notrace_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_ftrace_notrace' entry\n");
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
+ trace_create_file("set_graph_function", 0444, d_tracer,
NULL,
&ftrace_graph_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_graph_function' entry\n");
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
return 0;
@@ -2417,7 +2972,6 @@ static const struct file_operations ftrace_pid_fops = {
static __init int ftrace_init_debugfs(void)
{
struct dentry *d_tracer;
- struct dentry *entry;
d_tracer = tracing_init_dentry();
if (!d_tracer)
@@ -2425,11 +2979,11 @@ static __init int ftrace_init_debugfs(void)
ftrace_init_dyn_debugfs(d_tracer);
- entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
- NULL, &ftrace_pid_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_ftrace_pid' entry\n");
+ trace_create_file("set_ftrace_pid", 0644, d_tracer,
+ NULL, &ftrace_pid_fops);
+
+ ftrace_profile_debugfs(d_tracer);
+
return 0;
}
fs_initcall(ftrace_init_debugfs);
@@ -2538,7 +3092,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static atomic_t ftrace_graph_active;
+static int ftrace_graph_active;
static struct notifier_block ftrace_suspend_notifier;
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -2690,7 +3244,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
mutex_lock(&ftrace_lock);
/* we currently allow only one tracer registered at a time */
- if (atomic_read(&ftrace_graph_active)) {
+ if (ftrace_graph_active) {
ret = -EBUSY;
goto out;
}
@@ -2698,10 +3252,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
register_pm_notifier(&ftrace_suspend_notifier);
- atomic_inc(&ftrace_graph_active);
+ ftrace_graph_active++;
ret = start_graph_tracing();
if (ret) {
- atomic_dec(&ftrace_graph_active);
+ ftrace_graph_active--;
goto out;
}
@@ -2719,10 +3273,10 @@ void unregister_ftrace_graph(void)
{
mutex_lock(&ftrace_lock);
- if (!unlikely(atomic_read(&ftrace_graph_active)))
+ if (unlikely(!ftrace_graph_active))
goto out;
- atomic_dec(&ftrace_graph_active);
+ ftrace_graph_active--;
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
@@ -2736,7 +3290,7 @@ void unregister_ftrace_graph(void)
/* Allocate a return stack for newly created task */
void ftrace_graph_init_task(struct task_struct *t)
{
- if (atomic_read(&ftrace_graph_active)) {
+ if (ftrace_graph_active) {
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
* sizeof(struct ftrace_ret_stack),
GFP_KERNEL);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 960cbf44c844..74a11808c282 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2845,14 +2845,11 @@ static const struct file_operations rb_simple_fops = {
static __init int rb_init_debugfs(void)
{
struct dentry *d_tracer;
- struct dentry *entry;
d_tracer = tracing_init_dentry();
- entry = debugfs_create_file("tracing_on", 0644, d_tracer,
- &ring_buffer_flags, &rb_simple_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'tracing_on' entry\n");
+ trace_create_file("tracing_on", 0644, d_tracer,
+ &ring_buffer_flags, &rb_simple_fops);
return 0;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1ce5dc6372b8..4865459f609f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -255,7 +255,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
- TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME;
+ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
+ TRACE_ITER_GRAPH_TIME;
/**
* trace_wake_up - wake up tasks waiting for trace input
@@ -317,6 +318,7 @@ static const char *trace_options[] = {
"latency-format",
"global-clock",
"sleep-time",
+ "graph-time",
NULL
};
@@ -402,17 +404,6 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
return cnt;
}
-static void
-trace_print_seq(struct seq_file *m, struct trace_seq *s)
-{
- int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
-
- s->buffer[len] = 0;
- seq_puts(m, s->buffer);
-
- trace_seq_init(s);
-}
-
/**
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
* @tr: tracer
@@ -3596,7 +3587,7 @@ struct dentry *tracing_dentry_percpu(void)
static void tracing_init_debugfs_percpu(long cpu)
{
struct dentry *d_percpu = tracing_dentry_percpu();
- struct dentry *entry, *d_cpu;
+ struct dentry *d_cpu;
/* strlen(cpu) + MAX(log10(cpu)) + '\0' */
char cpu_dir[7];
@@ -3611,21 +3602,15 @@ static void tracing_init_debugfs_percpu(long cpu)
}
/* per cpu trace_pipe */
- entry = debugfs_create_file("trace_pipe", 0444, d_cpu,
- (void *) cpu, &tracing_pipe_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'trace_pipe' entry\n");
+ trace_create_file("trace_pipe", 0444, d_cpu,
+ (void *) cpu, &tracing_pipe_fops);
/* per cpu trace */
- entry = debugfs_create_file("trace", 0644, d_cpu,
- (void *) cpu, &tracing_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'trace' entry\n");
+ trace_create_file("trace", 0644, d_cpu,
+ (void *) cpu, &tracing_fops);
- entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu,
- (void *) cpu, &tracing_buffers_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n");
+ trace_create_file("trace_pipe_raw", 0444, d_cpu,
+ (void *) cpu, &tracing_buffers_fops);
}
#ifdef CONFIG_FTRACE_SELFTEST
@@ -3781,6 +3766,22 @@ static const struct file_operations trace_options_core_fops = {
.write = trace_options_core_write,
};
+struct dentry *trace_create_file(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ void *data,
+ const struct file_operations *fops)
+{
+ struct dentry *ret;
+
+ ret = debugfs_create_file(name, mode, parent, data, fops);
+ if (!ret)
+ pr_warning("Could not create debugfs '%s' entry\n", name);
+
+ return ret;
+}
+
+
static struct dentry *trace_options_init_dentry(void)
{
struct dentry *d_tracer;
@@ -3808,7 +3809,6 @@ create_trace_option_file(struct trace_option_dentry *topt,
struct tracer_opt *opt)
{
struct dentry *t_options;
- struct dentry *entry;
t_options = trace_options_init_dentry();
if (!t_options)
@@ -3817,11 +3817,9 @@ create_trace_option_file(struct trace_option_dentry *topt,
topt->flags = flags;
topt->opt = opt;
- entry = debugfs_create_file(opt->name, 0644, t_options, topt,
+ topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
&trace_options_fops);
- topt->entry = entry;
-
}
static struct trace_option_dentry *
@@ -3876,123 +3874,81 @@ static struct dentry *
create_trace_option_core_file(const char *option, long index)
{
struct dentry *t_options;
- struct dentry *entry;
t_options = trace_options_init_dentry();
if (!t_options)
return NULL;
- entry = debugfs_create_file(option, 0644, t_options, (void *)index,
+ return trace_create_file(option, 0644, t_options, (void *)index,
&trace_options_core_fops);
-
- return entry;
}
static __init void create_trace_options_dir(void)
{
struct dentry *t_options;
- struct dentry *entry;
int i;
t_options = trace_options_init_dentry();
if (!t_options)
return;
- for (i = 0; trace_options[i]; i++) {
- entry = create_trace_option_core_file(trace_options[i], i);
- if (!entry)
- pr_warning("Could not create debugfs %s entry\n",
- trace_options[i]);
- }
+ for (i = 0; trace_options[i]; i++)
+ create_trace_option_core_file(trace_options[i], i);
}
static __init int tracer_init_debugfs(void)
{
struct dentry *d_tracer;
- struct dentry *entry;
int cpu;
d_tracer = tracing_init_dentry();
- entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
- &global_trace, &tracing_ctrl_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
+ trace_create_file("tracing_enabled", 0644, d_tracer,
+ &global_trace, &tracing_ctrl_fops);
- entry = debugfs_create_file("trace_options", 0644, d_tracer,
- NULL, &tracing_iter_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'trace_options' entry\n");
+ trace_create_file("trace_options", 0644, d_tracer,
+ NULL, &tracing_iter_fops);
- create_trace_options_dir();
+ trace_create_file("tracing_cpumask", 0644, d_tracer,
+ NULL, &tracing_cpumask_fops);
+
+ trace_create_file("trace", 0644, d_tracer,
+ (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
+
+ trace_create_file("available_tracers", 0444, d_tracer,
+ &global_trace, &show_traces_fops);
+
+ trace_create_file("current_tracer", 0444, d_tracer,
+ &global_trace, &set_tracer_fops);
- entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
- NULL, &tracing_cpumask_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
-
- entry = debugfs_create_file("trace", 0644, d_tracer,
- (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'trace' entry\n");
-
- entry = debugfs_create_file("available_tracers", 0444, d_tracer,
- &global_trace, &show_traces_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'available_tracers' entry\n");
-
- entry = debugfs_create_file("current_tracer", 0444, d_tracer,
- &global_trace, &set_tracer_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'current_tracer' entry\n");
-
- entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
- &tracing_max_latency,
- &tracing_max_lat_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'tracing_max_latency' entry\n");
-
- entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
- &tracing_thresh, &tracing_max_lat_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'tracing_thresh' entry\n");
- entry = debugfs_create_file("README", 0644, d_tracer,
- NULL, &tracing_readme_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'README' entry\n");
-
- entry = debugfs_create_file("trace_pipe", 0444, d_tracer,
+ trace_create_file("tracing_max_latency", 0644, d_tracer,
+ &tracing_max_latency, &tracing_max_lat_fops);
+
+ trace_create_file("tracing_thresh", 0644, d_tracer,
+ &tracing_thresh, &tracing_max_lat_fops);
+
+ trace_create_file("README", 0644, d_tracer,
+ NULL, &tracing_readme_fops);
+
+ trace_create_file("trace_pipe", 0444, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'trace_pipe' entry\n");
-
- entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
- &global_trace, &tracing_entries_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'buffer_size_kb' entry\n");
-
- entry = debugfs_create_file("trace_marker", 0220, d_tracer,
- NULL, &tracing_mark_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'trace_marker' entry\n");
+
+ trace_create_file("buffer_size_kb", 0644, d_tracer,
+ &global_trace, &tracing_entries_fops);
+
+ trace_create_file("trace_marker", 0220, d_tracer,
+ NULL, &tracing_mark_fops);
#ifdef CONFIG_DYNAMIC_FTRACE
- entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
- &ftrace_update_tot_cnt,
- &tracing_dyn_info_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'dyn_ftrace_total_info' entry\n");
+ trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
+ &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
#endif
#ifdef CONFIG_SYSPROF_TRACER
init_tracer_sysprof_debugfs(d_tracer);
#endif
+ create_trace_options_dir();
+
for_each_tracing_cpu(cpu)
tracing_init_debugfs_percpu(cpu);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index e685ac2b2ba1..f76a8f8689d4 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -470,6 +470,12 @@ void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr);
int tracing_open_generic(struct inode *inode, struct file *filp);
+struct dentry *trace_create_file(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ void *data,
+ const struct file_operations *fops);
+
struct dentry *tracing_init_dentry(void);
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
@@ -613,6 +619,8 @@ extern unsigned long trace_flags;
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern enum print_line_t print_graph_function(struct trace_iterator *iter);
+extern enum print_line_t
+trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
#ifdef CONFIG_DYNAMIC_FTRACE
/* TODO: make this variable */
@@ -644,7 +652,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
return 1;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
-
#else /* CONFIG_FUNCTION_GRAPH_TRACER */
static inline enum print_line_t
print_graph_function(struct trace_iterator *iter)
@@ -692,6 +699,7 @@ enum trace_iterator_flags {
TRACE_ITER_LATENCY_FMT = 0x40000,
TRACE_ITER_GLOBAL_CLK = 0x80000,
TRACE_ITER_SLEEP_TIME = 0x100000,
+ TRACE_ITER_GRAPH_TIME = 0x200000,
};
/*
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 7a30fc4c3642..a29ef23ffb47 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -9,6 +9,7 @@
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
+#include <linux/time.h>
#include "trace.h"
#include "trace_output.h"
@@ -67,7 +68,7 @@ initcall_call_print_line(struct trace_iterator *iter)
trace_assign_type(field, entry);
call = &field->boot_call;
ts = iter->ts;
- nsec_rem = do_div(ts, 1000000000);
+ nsec_rem = do_div(ts, NSEC_PER_SEC);
ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
(unsigned long)ts, nsec_rem, call->func, call->caller);
@@ -92,7 +93,7 @@ initcall_ret_print_line(struct trace_iterator *iter)
trace_assign_type(field, entry);
init_ret = &field->boot_ret;
ts = iter->ts;
- nsec_rem = do_div(ts, 1000000000);
+ nsec_rem = do_div(ts, NSEC_PER_SEC);
ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
"returned %d after %llu msecs\n",
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index ad8c22efff41..e6e32912ffb8 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -263,7 +263,7 @@ static int branch_stat_show(struct seq_file *m, void *v)
return 0;
}
-static void *annotated_branch_stat_start(void)
+static void *annotated_branch_stat_start(struct tracer_stat *trace)
{
return __start_annotated_branch_profile;
}
@@ -338,7 +338,7 @@ static int all_branch_stat_headers(struct seq_file *m)
return 0;
}
-static void *all_branch_stat_start(void)
+static void *all_branch_stat_start(struct tracer_stat *trace)
{
return __start_branch_profile;
}
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 22cba9970776..199de9c74229 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -28,4 +28,3 @@ void ftrace_profile_disable(int event_id)
return event->profile_disable(event);
}
}
-
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d28687e7b3a7..10f6ad7d85f6 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -78,13 +78,14 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime;
+ current->ret_stack[index].subtime = 0;
*depth = index;
return 0;
}
/* Retrieve a function return address to the trace stack on thread info.*/
-void
+static void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
{
int index;
@@ -104,9 +105,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(&current->trace_overrun);
trace->depth = index;
- barrier();
- current->curr_ret_stack--;
-
}
/*
@@ -121,6 +119,8 @@ unsigned long ftrace_return_to_handler(void)
ftrace_pop_return_trace(&trace, &ret);
trace.rettime = trace_clock_local();
ftrace_graph_return(&trace);
+ barrier();
+ current->curr_ret_stack--;
if (unlikely(!ret)) {
ftrace_graph_stop();
@@ -426,8 +426,8 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
return TRACE_TYPE_HANDLED;
}
-static enum print_line_t
-print_graph_duration(unsigned long long duration, struct trace_seq *s)
+enum print_line_t
+trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
unsigned long nsecs_rem = do_div(duration, 1000);
/* log10(ULONG_MAX) + '\0' */
@@ -464,12 +464,23 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+print_graph_duration(unsigned long long duration, struct trace_seq *s)
+{
+ int ret;
+
+ ret = trace_print_graph_duration(duration, s);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
ret = trace_seq_printf(s, "| ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- return TRACE_TYPE_HANDLED;
+ return TRACE_TYPE_HANDLED;
}
/* Case of a leaf function on its call entry */
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 8e37fcddd8b4..d53b45ed0806 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -9,6 +9,8 @@
#include <linux/kernel.h>
#include <linux/mmiotrace.h>
#include <linux/pci.h>
+#include <linux/time.h>
+
#include <asm/atomic.h>
#include "trace.h"
@@ -174,7 +176,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
struct mmiotrace_rw *rw;
struct trace_seq *s = &iter->seq;
unsigned long long t = ns2usecs(iter->ts);
- unsigned long usec_rem = do_div(t, 1000000ULL);
+ unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t;
int ret = 1;
@@ -221,7 +223,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
struct mmiotrace_map *m;
struct trace_seq *s = &iter->seq;
unsigned long long t = ns2usecs(iter->ts);
- unsigned long usec_rem = do_div(t, 1000000ULL);
+ unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t;
int ret;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 64b54a59c55b..0e70fb07ca78 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -19,6 +19,16 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
static int next_event_type = __TRACE_LAST_TYPE + 1;
+void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+ int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
+
+ s->buffer[len] = 0;
+ seq_puts(m, s->buffer);
+
+ trace_seq_init(s);
+}
+
enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index e0bde39c2dd9..91630217fb46 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -20,6 +20,8 @@ trace_print_bprintk_msg_only(struct trace_iterator *iter);
extern enum print_line_t
trace_print_printk_msg_only(struct trace_iterator *iter);
+extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
+
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index eb81556107fe..9bece9687b62 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -245,17 +245,13 @@ static const struct file_operations ftrace_formats_fops = {
static __init int init_trace_printk_function_export(void)
{
struct dentry *d_tracer;
- struct dentry *entry;
d_tracer = tracing_init_dentry();
if (!d_tracer)
return 0;
- entry = debugfs_create_file("printk_formats", 0444, d_tracer,
+ trace_create_file("printk_formats", 0444, d_tracer,
NULL, &ftrace_formats_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'printk_formats' entry\n");
return 0;
}
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 9117cea6f1ae..9d8cccdfaa06 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -29,13 +29,13 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
int cpu;
int pc;
- if (!sched_ref || sched_stopped)
+ if (unlikely(!sched_ref))
return;
tracing_record_cmdline(prev);
tracing_record_cmdline(next);
- if (!tracer_enabled)
+ if (!tracer_enabled || sched_stopped)
return;
pc = preempt_count();
@@ -56,15 +56,15 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
unsigned long flags;
int cpu, pc;
- if (!likely(tracer_enabled))
+ if (unlikely(!sched_ref))
return;
- pc = preempt_count();
tracing_record_cmdline(current);
- if (sched_stopped)
+ if (!tracer_enabled || sched_stopped)
return;
+ pc = preempt_count();
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu];
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index c750f65f9661..1796f00524e1 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -352,19 +352,14 @@ __setup("stacktrace", enable_stacktrace);
static __init int stack_trace_init(void)
{
struct dentry *d_tracer;
- struct dentry *entry;
d_tracer = tracing_init_dentry();
- entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
- &max_stack_size, &stack_max_size_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'stack_max_size' entry\n");
+ trace_create_file("stack_max_size", 0644, d_tracer,
+ &max_stack_size, &stack_max_size_fops);
- entry = debugfs_create_file("stack_trace", 0444, d_tracer,
- NULL, &stack_trace_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'stack_trace' entry\n");
+ trace_create_file("stack_trace", 0444, d_tracer,
+ NULL, &stack_trace_fops);
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index acdebd771a93..fdde3a4a94cd 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -85,7 +85,7 @@ static int stat_seq_init(struct tracer_stat_session *session)
if (!ts->stat_cmp)
ts->stat_cmp = dummy_cmp;
- stat = ts->stat_start();
+ stat = ts->stat_start(ts);
if (!stat)
goto exit;
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h
index 202274cf7f3d..f3546a2cd826 100644
--- a/kernel/trace/trace_stat.h
+++ b/kernel/trace/trace_stat.h
@@ -12,7 +12,7 @@ struct tracer_stat {
/* The name of your stat file */
const char *name;
/* Iteration over statistic entries */
- void *(*stat_start)(void);
+ void *(*stat_start)(struct tracer_stat *trace);
void *(*stat_next)(void *prev, int idx);
/* Compare two entries for stats sorting */
int (*stat_cmp)(void *p1, void *p2);
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 91fd19c2149f..e04b76cc238a 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -321,11 +321,7 @@ static const struct file_operations sysprof_sample_fops = {
void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
{
- struct dentry *entry;
- entry = debugfs_create_file("sysprof_sample_period", 0644,
+ trace_create_file("sysprof_sample_period", 0644,
d_tracer, NULL, &sysprof_sample_fops);
- if (entry)
- return;
- pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n");
}
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index 797201e4a137..984b9175c13d 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -152,7 +152,7 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
return ret;
}
-static void *workqueue_stat_start(void)
+static void *workqueue_stat_start(struct tracer_stat *trace)
{
int cpu;
void *ret = NULL;