diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 2d182f119d1ddd28bc185b9c56d2bdbbdcfab560..58d6a6109040ce0955fb4c43ca1d9d891c51699b 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -605,7 +605,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) return; } - if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) { + if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) { *parent = old; return; } diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 82ddfd3a75aff0e60ac3c2bebae964fcf8808477..3e298e64f0dbc26b2d90f9d99996e4c091316855 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -190,7 +190,7 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) goto out; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) + if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; /* Only trace if the calling function expects to. */ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 356d2ec8e2fbbf0a4e0ccee99c1f261226ed4cde..fcf12af074270a299a6088e323ffcb16d53f512c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -33,6 +33,7 @@ config X86 select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_GRAPH_FP_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE select HAVE_FTRACE_SYSCALLS diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c929add475c9caf073f97c11b15ff6d645226126..0d4b28564c142fe59e6c6c8004c9ebac846f9544 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1154,6 +1154,7 @@ ENTRY(ftrace_graph_caller) pushl %edx movl 0xc(%esp), %edx lea 0x4(%ebp), %eax + movl (%ebp), %ecx subl $MCOUNT_INSN_SIZE, %edx call prepare_ftrace_return popl %edx @@ -1168,6 +1169,7 @@ return_to_handler: pushl %eax pushl %ecx pushl %edx + movl %ebp, %eax call ftrace_return_to_handler movl %eax, 0xc(%esp) popl %edx diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index de74f0a3e0ed38a75563baea16934d530a3e68d2..c251be7451079ac9150033b5987f6259902328b0 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -135,6 +135,7 @@ ENTRY(ftrace_graph_caller) leaq 8(%rbp), %rdi movq 0x38(%rsp), %rsi + movq (%rbp), %rdx subq $MCOUNT_INSN_SIZE, %rsi call prepare_ftrace_return @@ -150,6 +151,7 @@ GLOBAL(return_to_handler) /* Save the return values */ movq %rax, (%rsp) movq %rdx, 8(%rsp) + movq %rbp, %rdi call ftrace_return_to_handler diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index b79c5533c421b4aa7e18d753b64dba3c1f055b90..d94e1ea3b9fe03557f99494fd4ba123143103233 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -408,7 +408,8 @@ int ftrace_disable_ftrace_graph_caller(void) * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) { unsigned long old; int faulted; @@ -453,7 +454,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) return; } - if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) { + if (ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer) == -EBUSY) { *parent = old; return; } diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 39b95c56587e8f1f8852aed9318148fcc16aac56..dc3b1328aaeb6146f0593fafbdbfcde9eeaf8bc2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -362,6 +362,7 @@ struct ftrace_ret_stack { unsigned long func; unsigned long long calltime; unsigned long long subtime; + unsigned long fp; }; /* @@ -372,7 +373,8 @@ struct ftrace_ret_stack { extern void return_to_handler(void); extern int -ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, + unsigned long frame_pointer); /* * Sometimes we don't want to trace a function with the function diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1eac85253ce96f8feaf13fcdc05a82998ea4b92e..b17ed8787ded741a19067598d332df7de15080c3 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -18,6 +18,13 @@ config HAVE_FUNCTION_TRACER config HAVE_FUNCTION_GRAPH_TRACER bool +config HAVE_FUNCTION_GRAPH_FP_TEST + bool + help + An arch may pass in a unique value (frame pointer) to both the + entering and exiting of a function. On exit, the value is compared + and if it does not match, then it will panic the kernel. + config HAVE_FUNCTION_TRACE_MCOUNT_TEST bool help diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 8b592418d8b28953a28e40c2202a50cb2b40dd37..d2249abafb53ed66430678f99d7d7642962e7d90 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -57,7 +57,8 @@ static struct tracer_flags tracer_flags = { /* Add a function return address to the trace stack on thread info.*/ int -ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, + unsigned long frame_pointer) { unsigned long long calltime; int index; @@ -85,6 +86,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) current->ret_stack[index].func = func; current->ret_stack[index].calltime = calltime; current->ret_stack[index].subtime = 0; + current->ret_stack[index].fp = frame_pointer; *depth = index; return 0; @@ -92,7 +94,8 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) /* Retrieve a function return address to the trace stack on thread info.*/ static void -ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) +ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, + unsigned long frame_pointer) { int index; @@ -106,6 +109,31 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) return; } +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + /* + * The arch may choose to record the frame pointer used + * and check it here to make sure that it is what we expect it + * to be. If gcc does not set the place holder of the return + * address in the frame pointer, and does a copy instead, then + * the function graph trace will fail. This test detects this + * case. + * + * Currently, x86_32 with optimize for size (-Os) makes the latest + * gcc do the above. + */ + if (unlikely(current->ret_stack[index].fp != frame_pointer)) { + ftrace_graph_stop(); + WARN(1, "Bad frame pointer: expected %lx, received %lx\n" + " from func %pF return to %lx\n", + current->ret_stack[index].fp, + frame_pointer, + (void *)current->ret_stack[index].func, + current->ret_stack[index].ret); + *ret = (unsigned long)panic; + return; + } +#endif + *ret = current->ret_stack[index].ret; trace->func = current->ret_stack[index].func; trace->calltime = current->ret_stack[index].calltime; @@ -117,12 +145,12 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) * Send the trace to the ring-buffer. * @return the original return address. */ -unsigned long ftrace_return_to_handler(void) +unsigned long ftrace_return_to_handler(unsigned long frame_pointer) { struct ftrace_graph_ret trace; unsigned long ret; - ftrace_pop_return_trace(&trace, &ret); + ftrace_pop_return_trace(&trace, &ret, frame_pointer); trace.rettime = trace_clock_local(); ftrace_graph_return(&trace); barrier();