perf trace: Exclude the kernel part of the callchain leading to a syscall

The kernel parts are not that useful:

  # trace -m 512 -e nanosleep --call dwarf  usleep 1
     0.065 ( 0.065 ms): usleep/18732 nanosleep(rqtp: 0x7ffc4ee4e200) = 0
                                       syscall_slow_exit_work ([kernel.kallsyms])
                                       do_syscall_64 ([kernel.kallsyms])
                                       return_from_SYSCALL_64 ([kernel.kallsyms])
                                       __nanosleep (/usr/lib64/libc-2.22.so)
                                       usleep (/usr/lib64/libc-2.22.so)
                                       main (/usr/bin/usleep)
                                       __libc_start_main (/usr/lib64/libc-2.22.so)
                                       _start (/usr/bin/usleep)
  #

So lets just use perf_event_attr.exclude_callchain_kernel to avoid
collecting it in the ring buffer:

  # trace -m 512 -e nanosleep --call dwarf  usleep 1
     0.063 ( 0.063 ms): usleep/19212 nanosleep(rqtp: 0x7ffc3df10fb0) = 0
                                       __nanosleep (/usr/lib64/libc-2.22.so)
                                       usleep (/usr/lib64/libc-2.22.so)
                                       main (/usr/bin/usleep)
                                       __libc_start_main (/usr/lib64/libc-2.22.so)
                                       _start (/usr/bin/usleep)
  #

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Milian Wolff <milian.wolff@kdab.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-qctu3gqhpim0dfbcp9d86c91@git.kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
上级 ea453965
......@@ -123,6 +123,9 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
man pages for details. The ones that are most useful in 'perf trace'
are 'dwarf' and 'lbr', where available, try: 'perf trace --call-graph dwarf'.
--kernel-syscall-graph::
Show the kernel callchains on the syscall exit path.
--event::
Trace other events, see 'perf list' for a complete list.
......
......@@ -159,6 +159,7 @@ struct trace {
bool show_comm;
bool show_tool_stats;
bool trace_syscalls;
bool kernel_syscallchains;
bool force;
bool vfs_getname;
int trace_pgfaults;
......@@ -2661,6 +2662,15 @@ static int trace__add_syscall_newtp(struct trace *trace)
perf_evlist__add(evlist, sys_enter);
perf_evlist__add(evlist, sys_exit);
if (trace->opts.callgraph_set && !trace->kernel_syscallchains) {
/*
* We're interested only in the user space callchain
* leading to the syscall, allow overriding that for
* debugging reasons using --kernel_syscall_callchains
*/
sys_exit->attr.exclude_callchain_kernel = 1;
}
trace->syscalls.events.sys_enter = sys_enter;
trace->syscalls.events.sys_exit = sys_exit;
......@@ -3221,6 +3231,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
.output = stderr,
.show_comm = true,
.trace_syscalls = true,
.kernel_syscallchains = false,
};
const char *output_name = NULL;
const char *ev_qualifier_str = NULL;
......@@ -3269,6 +3280,8 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK(0, "call-graph", &trace.opts,
"record_mode[,record_size]", record_callchain_help,
&record_parse_callchain_opt),
OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
"Show the kernel callchains on the syscall exit path"),
OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_END()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册