aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-12-04 13:35:45 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-12-10 12:22:10 -0500
commit7e1413edd6194a9807aa5f3ac0378b9b4b9da879 (patch)
tree8357aadd8a15de615bf2fbb39cbd412a9697d165 /kernel
parenttracing: Remove unneeded synth_event_mutex (diff)
downloadlinux-dev-7e1413edd6194a9807aa5f3ac0378b9b4b9da879.tar.xz
linux-dev-7e1413edd6194a9807aa5f3ac0378b9b4b9da879.zip
tracing: Consolidate trace_add/remove_event_call back to the nolock functions
The trace_add/remove_event_call_nolock() functions were added to allow the tace_add/remove_event_call() code be called when the event_mutex lock was already taken. Now that all callers are done within the event_mutex, there's no reason to have two different interfaces. Remove the current wrapper trace_add/remove_event_call()s and rename the _nolock versions back to the original names. Link: http://lkml.kernel.org/r/154140866955.17322.2081425494660638846.stgit@devbox Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_events.c30
-rw-r--r--kernel/trace/trace_events_hist.c6
-rw-r--r--kernel/trace/trace_kprobe.c4
-rw-r--r--kernel/trace/trace_uprobe.c4
4 files changed, 11 insertions, 33 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index a3b157f689ee..bd0162c0467c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2305,7 +2305,8 @@ __trace_early_add_new_event(struct trace_event_call *call,
struct ftrace_module_file_ops;
static void __add_event_to_tracers(struct trace_event_call *call);
-int trace_add_event_call_nolock(struct trace_event_call *call)
+/* Add an additional event_call dynamically */
+int trace_add_event_call(struct trace_event_call *call)
{
int ret;
lockdep_assert_held(&event_mutex);
@@ -2320,17 +2321,6 @@ int trace_add_event_call_nolock(struct trace_event_call *call)
return ret;
}
-/* Add an additional event_call dynamically */
-int trace_add_event_call(struct trace_event_call *call)
-{
- int ret;
-
- mutex_lock(&event_mutex);
- ret = trace_add_event_call_nolock(call);
- mutex_unlock(&event_mutex);
- return ret;
-}
-
/*
* Must be called under locking of trace_types_lock, event_mutex and
* trace_event_sem.
@@ -2376,8 +2366,8 @@ static int probe_remove_event_call(struct trace_event_call *call)
return 0;
}
-/* no event_mutex version */
-int trace_remove_event_call_nolock(struct trace_event_call *call)
+/* Remove an event_call */
+int trace_remove_event_call(struct trace_event_call *call)
{
int ret;
@@ -2392,18 +2382,6 @@ int trace_remove_event_call_nolock(struct trace_event_call *call)
return ret;
}
-/* Remove an event_call */
-int trace_remove_event_call(struct trace_event_call *call)
-{
- int ret;
-
- mutex_lock(&event_mutex);
- ret = trace_remove_event_call_nolock(call);
- mutex_unlock(&event_mutex);
-
- return ret;
-}
-
#define for_each_event(event, start, end) \
for (event = start; \
(unsigned long)event < (unsigned long)end; \
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 21e4954375a1..82e72c48a5a9 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -960,7 +960,7 @@ static int register_synth_event(struct synth_event *event)
call->data = event;
call->tp = event->tp;
- ret = trace_add_event_call_nolock(call);
+ ret = trace_add_event_call(call);
if (ret) {
pr_warn("Failed to register synthetic event: %s\n",
trace_event_name(call));
@@ -969,7 +969,7 @@ static int register_synth_event(struct synth_event *event)
ret = set_synth_event_print_fmt(call);
if (ret < 0) {
- trace_remove_event_call_nolock(call);
+ trace_remove_event_call(call);
goto err;
}
out:
@@ -984,7 +984,7 @@ static int unregister_synth_event(struct synth_event *event)
struct trace_event_call *call = &event->call;
int ret;
- ret = trace_remove_event_call_nolock(call);
+ ret = trace_remove_event_call(call);
return ret;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index bdf8c2ad5152..0e0f7b8024fb 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1353,7 +1353,7 @@ static int register_kprobe_event(struct trace_kprobe *tk)
kfree(call->print_fmt);
return -ENODEV;
}
- ret = trace_add_event_call_nolock(call);
+ ret = trace_add_event_call(call);
if (ret) {
pr_info("Failed to register kprobe event: %s\n",
trace_event_name(call));
@@ -1368,7 +1368,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
int ret;
/* tp->event is unregistered in trace_remove_event_call() */
- ret = trace_remove_event_call_nolock(&tk->tp.call);
+ ret = trace_remove_event_call(&tk->tp.call);
if (!ret)
kfree(tk->tp.call.print_fmt);
return ret;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 4a7b21c891f3..e335576b9411 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1320,7 +1320,7 @@ static int register_uprobe_event(struct trace_uprobe *tu)
return -ENODEV;
}
- ret = trace_add_event_call_nolock(call);
+ ret = trace_add_event_call(call);
if (ret) {
pr_info("Failed to register uprobe event: %s\n",
@@ -1337,7 +1337,7 @@ static int unregister_uprobe_event(struct trace_uprobe *tu)
int ret;
/* tu->event is unregistered in trace_remove_event_call() */
- ret = trace_remove_event_call_nolock(&tu->tp.call);
+ ret = trace_remove_event_call(&tu->tp.call);
if (ret)
return ret;
kfree(tu->tp.call.print_fmt);