diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 95a0ad191f1934a2ebec7de0795fcaa103290d30..b0a46f889659ba755275f2c3accff1f3c2c2cfc4 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -182,6 +182,7 @@ config FTRACE_SYSCALLS bool "Trace syscalls" depends on HAVE_FTRACE_SYSCALLS select TRACING + select KALLSYMS help Basic tracer to catch the syscall entry and exit events. diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index efe3202c02092188d14a3eb32f8c9d92024a8dca..ae32d3b99b4b07b55509f7145598bc783b8ace41 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2494,7 +2494,7 @@ static int tracing_set_tracer(const char *buf) if (!ring_buffer_expanded) { ret = tracing_resize_ring_buffer(trace_buf_size); if (ret < 0) - return ret; + goto out; ret = 0; } @@ -4125,7 +4125,8 @@ __init static int tracer_alloc_buffers(void) &trace_panic_notifier); register_die_notifier(&trace_die_notifier); - ret = 0; + + return 0; out_free_cpumask: free_cpumask_var(tracing_reader_cpumask); diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index f907a2b29028a966033a9b5064c43fdad9843c3c..a2ca6f0fef9bb459d99038e2436f3e2a5f0a85bc 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -414,7 +414,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); - goto out; + goto out_no_start; } /* reset the max latency */ @@ -432,21 +432,16 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); - if (ret) { - tracing_start(); + if (ret) goto out; - } ret = trace_test_buffer(&max_tr, &count); - if (ret) { - tracing_start(); + if (ret) goto out; - } if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; - tracing_start(); goto out; } @@ -475,9 +470,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * goto out; } - out: - trace->reset(tr); +out: tracing_start(); +out_no_start: + trace->reset(tr); tracing_max_latency = save_max; return ret; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index c72e599230ff58d8a238501a5e734ea29fe65b38..a2a3af29c94337bef68f64608a85977a2fe39ce8 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -5,9 +5,13 @@ #include "trace_output.h" #include "trace.h" -static atomic_t refcount; +/* Keep a counter of the syscall tracing users */ +static int refcount; -/* Our two options */ +/* Prevent from races on thread flags toggling */ +static DEFINE_MUTEX(syscall_trace_lock); + +/* Option to display the parameters types */ enum { TRACE_SYSCALLS_OPT_TYPES = 0x1, }; @@ -18,7 +22,7 @@ static struct tracer_opt syscalls_opts[] = { }; static struct tracer_flags syscalls_flags = { - .val = 0, /* By default: no args types */ + .val = 0, /* By default: no parameters types */ .opts = syscalls_opts }; @@ -96,8 +100,11 @@ void start_ftrace_syscalls(void) unsigned long flags; struct task_struct *g, *t; - if (atomic_inc_return(&refcount) != 1) - goto out; + mutex_lock(&syscall_trace_lock); + + /* Don't enable the flag on the tasks twice */ + if (++refcount != 1) + goto unlock; arch_init_ftrace_syscalls(); read_lock_irqsave(&tasklist_lock, flags); @@ -107,8 +114,9 @@ void start_ftrace_syscalls(void) } while_each_thread(g, t); read_unlock_irqrestore(&tasklist_lock, flags); -out: - atomic_dec(&refcount); + +unlock: + mutex_unlock(&syscall_trace_lock); } void stop_ftrace_syscalls(void) @@ -116,8 +124,11 @@ void stop_ftrace_syscalls(void) unsigned long flags; struct task_struct *g, *t; - if (atomic_dec_return(&refcount)) - goto out; + mutex_lock(&syscall_trace_lock); + + /* There are perhaps still some users */ + if (--refcount) + goto unlock; read_lock_irqsave(&tasklist_lock, flags); @@ -126,8 +137,9 @@ void stop_ftrace_syscalls(void) } while_each_thread(g, t); read_unlock_irqrestore(&tasklist_lock, flags); -out: - atomic_inc(&refcount); + +unlock: + mutex_unlock(&syscall_trace_lock); } void ftrace_syscall_enter(struct pt_regs *regs) @@ -137,12 +149,9 @@ void ftrace_syscall_enter(struct pt_regs *regs) struct ring_buffer_event *event; int size; int syscall_nr; - int cpu; syscall_nr = syscall_get_nr(current, regs); - cpu = raw_smp_processor_id(); - sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; @@ -168,12 +177,9 @@ void ftrace_syscall_exit(struct pt_regs *regs) struct syscall_metadata *sys_data; struct ring_buffer_event *event; int syscall_nr; - int cpu; syscall_nr = syscall_get_nr(current, regs); - cpu = raw_smp_processor_id(); - sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; @@ -201,6 +207,7 @@ static int init_syscall_tracer(struct trace_array *tr) static void reset_syscall_tracer(struct trace_array *tr) { stop_ftrace_syscalls(); + tracing_reset_online_cpus(tr); } static struct trace_event syscall_enter_event = {