tracing: Consolidate trace_add/remove_event_call back to the nolock functions
The trace_add/remove_event_call_nolock() functions were added to allow the tace_add/remove_event_call() code be called when the event_mutex lock was already taken. Now that all callers are done within the event_mutex, there's no reason to have two different interfaces. Remove the current wrapper trace_add/remove_event_call()s and rename the _nolock versions back to the original names. Link: http://lkml.kernel.org/r/154140866955.17322.2081425494660638846.stgit@devbox Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
0e2b81f7b5
commit
7e1413edd6
@ -529,8 +529,6 @@ extern int trace_event_raw_init(struct trace_event_call *call);
|
|||||||
extern int trace_define_field(struct trace_event_call *call, const char *type,
|
extern int trace_define_field(struct trace_event_call *call, const char *type,
|
||||||
const char *name, int offset, int size,
|
const char *name, int offset, int size,
|
||||||
int is_signed, int filter_type);
|
int is_signed, int filter_type);
|
||||||
extern int trace_add_event_call_nolock(struct trace_event_call *call);
|
|
||||||
extern int trace_remove_event_call_nolock(struct trace_event_call *call);
|
|
||||||
extern int trace_add_event_call(struct trace_event_call *call);
|
extern int trace_add_event_call(struct trace_event_call *call);
|
||||||
extern int trace_remove_event_call(struct trace_event_call *call);
|
extern int trace_remove_event_call(struct trace_event_call *call);
|
||||||
extern int trace_event_get_offsets(struct trace_event_call *call);
|
extern int trace_event_get_offsets(struct trace_event_call *call);
|
||||||
|
@ -2305,7 +2305,8 @@ __trace_early_add_new_event(struct trace_event_call *call,
|
|||||||
struct ftrace_module_file_ops;
|
struct ftrace_module_file_ops;
|
||||||
static void __add_event_to_tracers(struct trace_event_call *call);
|
static void __add_event_to_tracers(struct trace_event_call *call);
|
||||||
|
|
||||||
int trace_add_event_call_nolock(struct trace_event_call *call)
|
/* Add an additional event_call dynamically */
|
||||||
|
int trace_add_event_call(struct trace_event_call *call)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
lockdep_assert_held(&event_mutex);
|
lockdep_assert_held(&event_mutex);
|
||||||
@ -2320,17 +2321,6 @@ int trace_add_event_call_nolock(struct trace_event_call *call)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add an additional event_call dynamically */
|
|
||||||
int trace_add_event_call(struct trace_event_call *call)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&event_mutex);
|
|
||||||
ret = trace_add_event_call_nolock(call);
|
|
||||||
mutex_unlock(&event_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be called under locking of trace_types_lock, event_mutex and
|
* Must be called under locking of trace_types_lock, event_mutex and
|
||||||
* trace_event_sem.
|
* trace_event_sem.
|
||||||
@ -2376,8 +2366,8 @@ static int probe_remove_event_call(struct trace_event_call *call)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* no event_mutex version */
|
/* Remove an event_call */
|
||||||
int trace_remove_event_call_nolock(struct trace_event_call *call)
|
int trace_remove_event_call(struct trace_event_call *call)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -2392,18 +2382,6 @@ int trace_remove_event_call_nolock(struct trace_event_call *call)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Remove an event_call */
|
|
||||||
int trace_remove_event_call(struct trace_event_call *call)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&event_mutex);
|
|
||||||
ret = trace_remove_event_call_nolock(call);
|
|
||||||
mutex_unlock(&event_mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define for_each_event(event, start, end) \
|
#define for_each_event(event, start, end) \
|
||||||
for (event = start; \
|
for (event = start; \
|
||||||
(unsigned long)event < (unsigned long)end; \
|
(unsigned long)event < (unsigned long)end; \
|
||||||
|
@ -960,7 +960,7 @@ static int register_synth_event(struct synth_event *event)
|
|||||||
call->data = event;
|
call->data = event;
|
||||||
call->tp = event->tp;
|
call->tp = event->tp;
|
||||||
|
|
||||||
ret = trace_add_event_call_nolock(call);
|
ret = trace_add_event_call(call);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_warn("Failed to register synthetic event: %s\n",
|
pr_warn("Failed to register synthetic event: %s\n",
|
||||||
trace_event_name(call));
|
trace_event_name(call));
|
||||||
@ -969,7 +969,7 @@ static int register_synth_event(struct synth_event *event)
|
|||||||
|
|
||||||
ret = set_synth_event_print_fmt(call);
|
ret = set_synth_event_print_fmt(call);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
trace_remove_event_call_nolock(call);
|
trace_remove_event_call(call);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
@ -984,7 +984,7 @@ static int unregister_synth_event(struct synth_event *event)
|
|||||||
struct trace_event_call *call = &event->call;
|
struct trace_event_call *call = &event->call;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = trace_remove_event_call_nolock(call);
|
ret = trace_remove_event_call(call);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1353,7 +1353,7 @@ static int register_kprobe_event(struct trace_kprobe *tk)
|
|||||||
kfree(call->print_fmt);
|
kfree(call->print_fmt);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
ret = trace_add_event_call_nolock(call);
|
ret = trace_add_event_call(call);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_info("Failed to register kprobe event: %s\n",
|
pr_info("Failed to register kprobe event: %s\n",
|
||||||
trace_event_name(call));
|
trace_event_name(call));
|
||||||
@ -1368,7 +1368,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* tp->event is unregistered in trace_remove_event_call() */
|
/* tp->event is unregistered in trace_remove_event_call() */
|
||||||
ret = trace_remove_event_call_nolock(&tk->tp.call);
|
ret = trace_remove_event_call(&tk->tp.call);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
kfree(tk->tp.call.print_fmt);
|
kfree(tk->tp.call.print_fmt);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1320,7 +1320,7 @@ static int register_uprobe_event(struct trace_uprobe *tu)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = trace_add_event_call_nolock(call);
|
ret = trace_add_event_call(call);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_info("Failed to register uprobe event: %s\n",
|
pr_info("Failed to register uprobe event: %s\n",
|
||||||
@ -1337,7 +1337,7 @@ static int unregister_uprobe_event(struct trace_uprobe *tu)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* tu->event is unregistered in trace_remove_event_call() */
|
/* tu->event is unregistered in trace_remove_event_call() */
|
||||||
ret = trace_remove_event_call_nolock(&tu->tp.call);
|
ret = trace_remove_event_call(&tu->tp.call);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
kfree(tu->tp.call.print_fmt);
|
kfree(tu->tp.call.print_fmt);
|
||||||
|
Loading…
Reference in New Issue
Block a user