提交 843f4f4b 编写于 作者: L Linus Torvalds

Merge tag 'trace-fixes-3.13-rc2' of...

Merge tag 'trace-fixes-3.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fix from Steven Rostedt:
 "A regression showed up that there's a large delay when enabling all
  events.  This was prevalent when FTRACE_SELFTEST was enabled which
  enables all events several times, and caused the system bootup to
  pause for over a minute.

  This was tracked down to an addition of a synchronize_sched()
  performed when system call tracepoints are unregistered.

  The synchronize_sched() is needed between the unregistering of the
  system call tracepoint and a deletion of a tracing instance buffer.
  But placing the synchronize_sched() in the unreg of *every* system
  call tracepoint is a bit overboard.  A single synchronize_sched()
  before the deletion of the instance is sufficient"

* tag 'trace-fixes-3.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Only run synchronize_sched() at instance deletion time
...@@ -2314,6 +2314,9 @@ int event_trace_del_tracer(struct trace_array *tr) ...@@ -2314,6 +2314,9 @@ int event_trace_del_tracer(struct trace_array *tr)
/* Disable any running events */ /* Disable any running events */
__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
/* Access to events are within rcu_read_lock_sched() */
synchronize_sched();
down_write(&trace_event_sem); down_write(&trace_event_sem);
__trace_remove_event_dirs(tr); __trace_remove_event_dirs(tr);
debugfs_remove_recursive(tr->event_dir); debugfs_remove_recursive(tr->event_dir);
......
...@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, ...@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
if (!tr->sys_refcount_enter) if (!tr->sys_refcount_enter)
unregister_trace_sys_enter(ftrace_syscall_enter, tr); unregister_trace_sys_enter(ftrace_syscall_enter, tr);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
/*
* Callers expect the event to be completely disabled on
* return, so wait for current handlers to finish.
*/
synchronize_sched();
} }
static int reg_event_syscall_exit(struct ftrace_event_file *file, static int reg_event_syscall_exit(struct ftrace_event_file *file,
...@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, ...@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
if (!tr->sys_refcount_exit) if (!tr->sys_refcount_exit)
unregister_trace_sys_exit(ftrace_syscall_exit, tr); unregister_trace_sys_exit(ftrace_syscall_exit, tr);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
/*
* Callers expect the event to be completely disabled on
* return, so wait for current handlers to finish.
*/
synchronize_sched();
} }
static int __init init_syscall_trace(struct ftrace_event_call *call) static int __init init_syscall_trace(struct ftrace_event_call *call)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册