提交 1fd8df2c 编写于 作者: M Masami Hiramatsu 提交者: Steven Rostedt

tracing/kprobes: Fix kprobe-tracer to support stack trace

Fix to support kernel stack trace correctly on kprobe-tracer.
Since the execution path of kprobe-based dynamic events is different
from other tracepoint-based events, normal ftrace_trace_stack() doesn't
work correctly. To fix that, this introduces ftrace_trace_stack_regs()
which traces stack via pt_regs instead of current stack register.

e.g.

 # echo p schedule+4 > /sys/kernel/debug/tracing/kprobe_events
 # echo 1 > /sys/kernel/debug/tracing/options/stacktrace
 # echo 1 > /sys/kernel/debug/tracing/events/kprobes/enable
 # head -n 20 /sys/kernel/debug/tracing/trace
            bash-2968  [000] 10297.050245: p_schedule_4: (schedule+0x4/0x4ca)
            bash-2968  [000] 10297.050247: <stack trace>
 => schedule_timeout
 => n_tty_read
 => tty_read
 => vfs_read
 => sys_read
 => system_call_fastpath
     kworker/0:1-2940  [000] 10297.050265: p_schedule_4: (schedule+0x4/0x4ca)
     kworker/0:1-2940  [000] 10297.050266: <stack trace>
 => worker_thread
 => kthread
 => kernel_thread_helper
            sshd-1132  [000] 10297.050365: p_schedule_4: (schedule+0x4/0x4ca)
            sshd-1132  [000] 10297.050365: <stack trace>
 => sysret_careful

Note: Even with this fix, the first entry will be skipped
if the probe is put on the function entry area before
the frame pointer is set up (usually, that is 4 bytes
 (push %bp; mov %sp %bp) on x86), because stack unwinder
depends on the frame pointer.
Signed-off-by: NMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: yrl.pp-manager.tt@hitachi.com
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Namhyung Kim <namhyung@gmail.com>
Link: http://lkml.kernel.org/r/20110608070934.17777.17116.stgit@fedora15Signed-off-by: NSteven Rostedt <rostedt@goodmis.org>
上级 c624d33f
...@@ -129,6 +129,10 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, ...@@ -129,6 +129,10 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer, void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
......
...@@ -1193,6 +1193,18 @@ void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, ...@@ -1193,6 +1193,18 @@ void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
} }
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
{
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer, void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
...@@ -1238,7 +1250,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, ...@@ -1238,7 +1250,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct ring_buffer *buffer, static void __ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags, unsigned long flags,
int skip, int pc) int skip, int pc, struct pt_regs *regs)
{ {
struct ftrace_event_call *call = &event_kernel_stack; struct ftrace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -1257,24 +1269,36 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, ...@@ -1257,24 +1269,36 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
trace.skip = skip; trace.skip = skip;
trace.entries = entry->caller; trace.entries = entry->caller;
if (regs)
save_stack_trace_regs(regs, &trace);
else
save_stack_trace(&trace); save_stack_trace(&trace);
if (!filter_check_discard(call, entry, buffer, event)) if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event); ring_buffer_unlock_commit(buffer, event);
} }
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
__ftrace_trace_stack(buffer, flags, skip, pc, regs);
}
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc) int skip, int pc)
{ {
if (!(trace_flags & TRACE_ITER_STACKTRACE)) if (!(trace_flags & TRACE_ITER_STACKTRACE))
return; return;
__ftrace_trace_stack(buffer, flags, skip, pc); __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
} }
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc) int pc)
{ {
__ftrace_trace_stack(tr->buffer, flags, skip, pc); __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
} }
/** /**
...@@ -1290,7 +1314,7 @@ void trace_dump_stack(void) ...@@ -1290,7 +1314,7 @@ void trace_dump_stack(void)
local_save_flags(flags); local_save_flags(flags);
/* skipping 3 traces, seems to get us at the caller of this function */ /* skipping 3 traces, seems to get us at the caller of this function */
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
} }
static DEFINE_PER_CPU(int, user_stack_count); static DEFINE_PER_CPU(int, user_stack_count);
......
...@@ -389,6 +389,9 @@ void update_max_tr_single(struct trace_array *tr, ...@@ -389,6 +389,9 @@ void update_max_tr_single(struct trace_array *tr,
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc); int skip, int pc);
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc, struct pt_regs *regs);
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
int pc); int pc);
...@@ -400,6 +403,12 @@ static inline void ftrace_trace_stack(struct ring_buffer *buffer, ...@@ -400,6 +403,12 @@ static inline void ftrace_trace_stack(struct ring_buffer *buffer,
{ {
} }
static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
unsigned long flags, int skip,
int pc, struct pt_regs *regs)
{
}
static inline void ftrace_trace_userstack(struct ring_buffer *buffer, static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
......
...@@ -1397,7 +1397,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) ...@@ -1397,7 +1397,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
} }
/* Kretprobe handler */ /* Kretprobe handler */
...@@ -1429,7 +1430,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, ...@@ -1429,7 +1430,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
} }
/* Event entry printers */ /* Event entry printers */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册