提交 9eff26ea 编写于 作者: P Paul Mackerras 提交者: Ingo Molnar

powerpc/perf_events: Fix call-graph recording, add perf_arch_fetch_caller_regs

This implements a powerpc version of perf_arch_fetch_caller_regs
to get correct call-graphs.

It's implemented in assembly because that way we can be sure there isn't
a stack frame for perf_arch_fetch_caller_regs.  If it was in C, gcc might
or might not create a stack frame for it, which would affect the number
of levels we have to skip.

With this, we see results from perf record -e lock:lock_acquire like
this:

 # Samples: 24878
 #
 # Overhead         Command      Shared Object  Symbol
 # ........  ..............  .................  ......
 #
    14.99%            perf  [kernel.kallsyms]  [k] ._raw_spin_lock
                      |
                      --- ._raw_spin_lock
                         |
                         |--25.00%-- .alloc_fd
                         |          (nil)
                         |          |
                         |          |--50.00%-- .anon_inode_getfd
                         |          |          .sys_perf_event_open
                         |          |          syscall_exit
                         |          |          syscall
                         |          |          create_counter
                         |          |          __cmd_record
                         |          |          run_builtin
                         |          |          main
                         |          |          0xfd2e704
                         |          |          0xfd2e8c0
                         |          |          (nil)

... etc.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Acked-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: anton@samba.org
Cc: linuxppc-dev@ozlabs.org
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100318050513.GA6575@drongo>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 00909e95
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
#define PPC_STLCX stringify_in_c(stdcx.) #define PPC_STLCX stringify_in_c(stdcx.)
#define PPC_CNTLZL stringify_in_c(cntlzd) #define PPC_CNTLZL stringify_in_c(cntlzd)
#define PPC_LR_STKOFF 16
/* Move to CR, single-entry optimized version. Only available /* Move to CR, single-entry optimized version. Only available
* on POWER4 and later. * on POWER4 and later.
...@@ -51,6 +52,7 @@ ...@@ -51,6 +52,7 @@
#define PPC_STLCX stringify_in_c(stwcx.) #define PPC_STLCX stringify_in_c(stwcx.)
#define PPC_CNTLZL stringify_in_c(cntlzw) #define PPC_CNTLZL stringify_in_c(cntlzw)
#define PPC_MTOCRF stringify_in_c(mtcrf) #define PPC_MTOCRF stringify_in_c(mtcrf)
#define PPC_LR_STKOFF 4
#endif #endif
......
...@@ -127,3 +127,31 @@ _GLOBAL(__setup_cpu_power7) ...@@ -127,3 +127,31 @@ _GLOBAL(__setup_cpu_power7)
_GLOBAL(__restore_cpu_power7) _GLOBAL(__restore_cpu_power7)
/* place holder */ /* place holder */
blr blr
#ifdef CONFIG_EVENT_TRACING
/*
* Get a minimal set of registers for our caller's nth caller.
* r3 = regs pointer, r5 = n.
*
* We only get R1 (stack pointer), NIP (next instruction pointer)
* and LR (link register). These are all we can get in the
* general case without doing complicated stack unwinding, but
* fortunately they are enough to do a stack backtrace, which
* is all we need them for.
*/
_GLOBAL(perf_arch_fetch_caller_regs)
mr r6,r1
cmpwi r5,0
mflr r4
ble 2f
mtctr r5
1: PPC_LL r6,0(r6)
bdnz 1b
PPC_LL r4,PPC_LR_STKOFF(r6)
2: PPC_LL r7,0(r6)
PPC_LL r7,PPC_LR_STKOFF(r7)
PPC_STL r6,GPR1-STACK_FRAME_OVERHEAD(r3)
PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3)
PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3)
blr
#endif /* CONFIG_EVENT_TRACING */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册