aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c35
-rw-r--r--kernel/trace/kmemtrace.c2
-rw-r--r--kernel/trace/ring_buffer.c14
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_event_profile.c15
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_hw_branches.c8
-rw-r--r--kernel/trace/trace_output.c23
-rw-r--r--kernel/trace/trace_syscalls.c4
10 files changed, 67 insertions, 55 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 46592feab5a6..6dc4e5ef7a01 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
if (ftrace_trace_function == ftrace_stub)
return;
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
func = ftrace_trace_function;
+#else
+ func = __ftrace_trace_function;
+#endif
if (ftrace_pid_trace) {
set_ftrace_pid_function(func);
@@ -736,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
out:
mutex_unlock(&ftrace_profile_lock);
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -1074,14 +1078,9 @@ static void ftrace_replace_code(int enable)
failed = __ftrace_replace_code(rec, enable);
if (failed) {
rec->flags |= FTRACE_FL_FAILED;
- if ((system_state == SYSTEM_BOOTING) ||
- !core_kernel_text(rec->ip)) {
- ftrace_free_rec(rec);
- } else {
- ftrace_bug(failed, rec->ip);
- /* Stop processing */
- return;
- }
+ ftrace_bug(failed, rec->ip);
+ /* Stop processing */
+ return;
}
} while_for_each_ftrace_rec();
}
@@ -2223,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
ret = ftrace_process_regex(parser->buffer,
parser->idx, enable);
if (ret)
- goto out;
+ goto out_unlock;
trace_parser_clear(parser);
}
ret = read;
-
+out_unlock:
mutex_unlock(&ftrace_regex_lock);
-out:
+
return ret;
}
@@ -2658,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod,
}
#ifdef CONFIG_MODULES
-void ftrace_release(void *start, void *end)
+void ftrace_release_mod(struct module *mod)
{
struct dyn_ftrace *rec;
struct ftrace_page *pg;
- unsigned long s = (unsigned long)start;
- unsigned long e = (unsigned long)end;
- if (ftrace_disabled || !start || start == end)
+ if (ftrace_disabled)
return;
mutex_lock(&ftrace_lock);
do_for_each_ftrace_rec(pg, rec) {
- if ((rec->ip >= s) && (rec->ip < e)) {
+ if (within_module_core(rec->ip, mod)) {
/*
* rec->ip is changed in ftrace_free_rec()
* It should not between s and e if record was freed.
@@ -2702,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self,
mod->num_ftrace_callsites);
break;
case MODULE_STATE_GOING:
- ftrace_release(mod->ftrace_callsites,
- mod->ftrace_callsites +
- mod->num_ftrace_callsites);
+ ftrace_release_mod(mod);
break;
}
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c8549..a91da69f153a 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
return 1;
}
- if (!register_tracer(&kmem_tracer)) {
+ if (register_tracer(&kmem_tracer) != 0) {
pr_warning("Warning: could not register the kmem tracer\n");
return 1;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d4ff01970547..5dd017fea6f5 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -483,7 +483,7 @@ struct ring_buffer_iter {
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
-static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
+static inline u64 rb_time_stamp(struct ring_buffer *buffer)
{
/* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT;
@@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
u64 time;
preempt_disable_notrace();
- time = rb_time_stamp(buffer, cpu);
+ time = rb_time_stamp(buffer);
preempt_enable_no_resched_notrace();
return time;
@@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list)
}
/*
- * rb_is_head_page - test if the give page is the head page
+ * rb_is_head_page - test if the given page is the head page
*
* Because the reader may move the head_page pointer, we can
* not trust what the head page is (it may be pointing to
@@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
+ spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
return;
rb_reset_cpu(cpu_buffer);
+ spin_unlock_irq(&cpu_buffer->reader_lock);
rb_check_pages(cpu_buffer);
@@ -1868,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* Nested commits always have zero deltas, so
* just reread the time stamp
*/
- *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
+ *ts = rb_time_stamp(buffer);
next_page->page->time_stamp = *ts;
}
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
goto out_fail;
- ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
+ ts = rb_time_stamp(cpu_buffer->buffer);
/*
* Only the first commit can update the timestamp.
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
EXPORT_SYMBOL_GPL(ring_buffer_entries);
/**
- * ring_buffer_overrun_cpu - get the number of overruns in buffer
+ * ring_buffer_overruns - get the number of overruns in buffer
* @buffer: The ring buffer
*
* Returns the total number of overruns in the ring buffer
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 45068269ebb1..b20d3ec75de9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr,
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
- return trace_array_printk(&global_trace, ip, fmt, args);
+ return trace_array_vprintk(&global_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
@@ -2440,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
return ret;
}
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -2582,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
}
mutex_unlock(&trace_types_lock);
- filp->f_pos += cnt;
+ *ppos += cnt;
return cnt;
}
@@ -2764,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
if (err)
return err;
- filp->f_pos += ret;
+ *ppos += ret;
return ret;
}
@@ -3299,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
}
}
- filp->f_pos += cnt;
+ *ppos += cnt;
/* If check pages failed, return ENOMEM */
if (tracing_disabled)
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 7a7a9fd249a9..4a194f08f88c 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event;
struct trace_branch *entry;
+ struct ring_buffer *buffer;
unsigned long flags;
int cpu, pc;
const char *p;
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
goto out;
pc = preempt_count();
- event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
+ buffer = tr->buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event)
goto out;
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line;
entry->correct = val == expect;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index dd44b8768867..8d5c171cc998 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
if (atomic_inc_return(&event->profile_count))
return 0;
- if (!total_profile_count++) {
+ if (!total_profile_count) {
buf = (char *)alloc_percpu(profile_buf_t);
if (!buf)
goto fail_buf;
@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
}
ret = event->profile_enable();
- if (!ret)
+ if (!ret) {
+ total_profile_count++;
return 0;
+ }
- kfree(trace_profile_buf_nmi);
fail_buf_nmi:
- kfree(trace_profile_buf);
+ if (!total_profile_count) {
+ free_percpu(trace_profile_buf_nmi);
+ free_percpu(trace_profile_buf);
+ trace_profile_buf_nmi = NULL;
+ trace_profile_buf = NULL;
+ }
fail_buf:
- total_profile_count--;
atomic_dec(&event->profile_count);
return ret;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 23245785927f..98a6cc5c64ed 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps)
while (!list_empty(&ps->postfix)) {
elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
- kfree(elt->operand);
list_del(&elt->list);
+ kfree(elt->operand);
+ kfree(elt);
}
}
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 23b63859130e..69543a905cd5 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to)
struct ftrace_event_call *call = &event_hw_branch;
struct trace_array *tr = hw_branch_trace;
struct ring_buffer_event *event;
+ struct ring_buffer *buf;
struct hw_branch_entry *entry;
unsigned long irq1;
int cpu;
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
- event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
+ buf = tr->buffer;
+ event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
sizeof(*entry), 0, 0);
if (!event)
goto out;
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to)
entry->ent.type = TRACE_HW_BRANCHES;
entry->from = from;
entry->to = to;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- trace_buffer_unlock_commit(tr, event, 0, 0);
+ if (!filter_check_discard(call, entry, buf, event))
+ trace_buffer_unlock_commit(buf, event, 0, 0);
out:
atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index f572f44c6e1e..b6c12c6a1bcd 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
* @s: trace sequence descriptor
* @fmt: printf format string
*
+ * It returns 0 if the trace oversizes the buffer's free
+ * space, 1 otherwise.
+ *
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
* trace_seq_printf is used to store strings into a special
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
s->len += ret;
- return len;
+ return 1;
}
EXPORT_SYMBOL_GPL(trace_seq_printf);
@@ -486,16 +489,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
hardirq ? 'h' : softirq ? 's' : '.'))
return 0;
- if (entry->lock_depth < 0)
- ret = trace_seq_putc(s, '.');
+ if (entry->preempt_count)
+ ret = trace_seq_printf(s, "%x", entry->preempt_count);
else
- ret = trace_seq_printf(s, "%d", entry->lock_depth);
+ ret = trace_seq_putc(s, '.');
+
if (!ret)
return 0;
- if (entry->preempt_count)
- return trace_seq_printf(s, "%x", entry->preempt_count);
- return trace_seq_putc(s, '.');
+ if (entry->lock_depth < 0)
+ return trace_seq_putc(s, '.');
+
+ return trace_seq_printf(s, "%d", entry->lock_depth);
}
static int
@@ -883,7 +888,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
trace_assign_type(field, iter->ent);
if (!S)
- task_state_char(field->prev_state);
+ S = task_state_char(field->prev_state);
T = task_state_char(field->next_state);
if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
field->prev_pid,
@@ -918,7 +923,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
trace_assign_type(field, iter->ent);
if (!S)
- task_state_char(field->prev_state);
+ S = task_state_char(field->prev_state);
T = task_state_char(field->next_state);
SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 9fbce6c9d2e1..527e17eae575 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
SYSCALL_FIELD(int, nr),
- SYSCALL_FIELD(unsigned long, ret));
+ SYSCALL_FIELD(long, ret));
if (!ret)
return 0;
@@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
if (ret)
return ret;
- ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0,
+ ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
FILTER_OTHER);
return ret;