perf evsel: Do not use globals in config()

Instead receive a callchain_param pointer to configure callchain
aspects, not doing so if NULL is passed.

This will allow fine grained control over which evsels in an evlist
gets callchains enabled.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Milian Wolff <milian.wolff@kdab.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-2mupip6khc92mh5x4nw9to82@git.kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
上级 44621819
...@@ -71,7 +71,7 @@ int test__perf_time_to_tsc(int subtest __maybe_unused) ...@@ -71,7 +71,7 @@ int test__perf_time_to_tsc(int subtest __maybe_unused)
CHECK__(parse_events(evlist, "cycles:u", NULL)); CHECK__(parse_events(evlist, "cycles:u", NULL));
perf_evlist__config(evlist, &opts); perf_evlist__config(evlist, &opts, NULL);
evsel = perf_evlist__first(evlist); evsel = perf_evlist__first(evlist);
......
...@@ -982,7 +982,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm) ...@@ -982,7 +982,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
struct perf_evlist *evlist = kvm->evlist; struct perf_evlist *evlist = kvm->evlist;
char sbuf[STRERR_BUFSIZE]; char sbuf[STRERR_BUFSIZE];
perf_evlist__config(evlist, &kvm->opts); perf_evlist__config(evlist, &kvm->opts, NULL);
/* /*
* Note: exclude_{guest,host} do not apply here. * Note: exclude_{guest,host} do not apply here.
......
...@@ -284,7 +284,7 @@ static int record__open(struct record *rec) ...@@ -284,7 +284,7 @@ static int record__open(struct record *rec)
struct record_opts *opts = &rec->opts; struct record_opts *opts = &rec->opts;
int rc = 0; int rc = 0;
perf_evlist__config(evlist, opts); perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each(evlist, pos) { evlist__for_each(evlist, pos) {
try_again: try_again:
......
...@@ -886,7 +886,7 @@ static int perf_top__start_counters(struct perf_top *top) ...@@ -886,7 +886,7 @@ static int perf_top__start_counters(struct perf_top *top)
struct perf_evlist *evlist = top->evlist; struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts; struct record_opts *opts = &top->record_opts;
perf_evlist__config(evlist, opts); perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each(evlist, counter) { evlist__for_each(evlist, counter) {
try_again: try_again:
......
...@@ -2749,7 +2749,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv) ...@@ -2749,7 +2749,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_delete_evlist; goto out_delete_evlist;
} }
perf_evlist__config(evlist, &trace->opts); perf_evlist__config(evlist, &trace->opts, &callchain_param);
signal(SIGCHLD, sig_handler); signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler); signal(SIGINT, sig_handler);
......
...@@ -138,7 +138,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void), ...@@ -138,7 +138,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
perf_evlist__splice_list_tail(evlist, &parse_evlist.list); perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
evlist->nr_groups = parse_evlist.nr_groups; evlist->nr_groups = parse_evlist.nr_groups;
perf_evlist__config(evlist, &opts); perf_evlist__config(evlist, &opts, NULL);
err = perf_evlist__open(evlist); err = perf_evlist__open(evlist);
if (err < 0) { if (err < 0) {
......
...@@ -532,7 +532,7 @@ static int do_test_code_reading(bool try_kcore) ...@@ -532,7 +532,7 @@ static int do_test_code_reading(bool try_kcore)
goto out_put; goto out_put;
} }
perf_evlist__config(evlist, &opts); perf_evlist__config(evlist, &opts, NULL);
evsel = perf_evlist__first(evlist); evsel = perf_evlist__first(evlist);
......
...@@ -80,7 +80,7 @@ int test__keep_tracking(int subtest __maybe_unused) ...@@ -80,7 +80,7 @@ int test__keep_tracking(int subtest __maybe_unused)
CHECK__(parse_events(evlist, "dummy:u", NULL)); CHECK__(parse_events(evlist, "dummy:u", NULL));
CHECK__(parse_events(evlist, "cycles:u", NULL)); CHECK__(parse_events(evlist, "cycles:u", NULL));
perf_evlist__config(evlist, &opts); perf_evlist__config(evlist, &opts, NULL);
evsel = perf_evlist__first(evlist); evsel = perf_evlist__first(evlist);
......
...@@ -44,7 +44,7 @@ int test__syscall_openat_tp_fields(int subtest __maybe_unused) ...@@ -44,7 +44,7 @@ int test__syscall_openat_tp_fields(int subtest __maybe_unused)
goto out_delete_evlist; goto out_delete_evlist;
} }
perf_evsel__config(evsel, &opts); perf_evsel__config(evsel, &opts, NULL);
thread_map__set_pid(evlist->threads, 0, getpid()); thread_map__set_pid(evlist->threads, 0, getpid());
......
...@@ -99,7 +99,7 @@ int test__PERF_RECORD(int subtest __maybe_unused) ...@@ -99,7 +99,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
perf_evsel__set_sample_bit(evsel, CPU); perf_evsel__set_sample_bit(evsel, CPU);
perf_evsel__set_sample_bit(evsel, TID); perf_evsel__set_sample_bit(evsel, TID);
perf_evsel__set_sample_bit(evsel, TIME); perf_evsel__set_sample_bit(evsel, TIME);
perf_evlist__config(evlist, &opts); perf_evlist__config(evlist, &opts, NULL);
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
if (err < 0) { if (err < 0) {
......
...@@ -417,7 +417,7 @@ int test__switch_tracking(int subtest __maybe_unused) ...@@ -417,7 +417,7 @@ int test__switch_tracking(int subtest __maybe_unused)
perf_evsel__set_sample_bit(tracking_evsel, TIME); perf_evsel__set_sample_bit(tracking_evsel, TIME);
/* Config events */ /* Config events */
perf_evlist__config(evlist, &opts); perf_evlist__config(evlist, &opts, NULL);
/* Check moved event is still at the front */ /* Check moved event is still at the front */
if (cycles_evsel != perf_evlist__first(evlist)) { if (cycles_evsel != perf_evlist__first(evlist)) {
......
...@@ -123,11 +123,14 @@ void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); ...@@ -123,11 +123,14 @@ void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
int perf_evlist__open(struct perf_evlist *evlist); int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__close(struct perf_evlist *evlist); void perf_evlist__close(struct perf_evlist *evlist);
struct callchain_param;
void perf_evlist__set_id_pos(struct perf_evlist *evlist); void perf_evlist__set_id_pos(struct perf_evlist *evlist);
bool perf_can_sample_identifier(void); bool perf_can_sample_identifier(void);
bool perf_can_record_switch_events(void); bool perf_can_record_switch_events(void);
bool perf_can_record_cpu_wide(void); bool perf_can_record_cpu_wide(void);
void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts); void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
struct callchain_param *callchain);
int record_opts__config(struct record_opts *opts); int record_opts__config(struct record_opts *opts);
int perf_evlist__prepare_workload(struct perf_evlist *evlist, int perf_evlist__prepare_workload(struct perf_evlist *evlist,
......
...@@ -737,7 +737,8 @@ static void apply_config_terms(struct perf_evsel *evsel, ...@@ -737,7 +737,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
* enable/disable events specifically, as there's no * enable/disable events specifically, as there's no
* initial traced exec call. * initial traced exec call.
*/ */
void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
struct callchain_param *callchain)
{ {
struct perf_evsel *leader = evsel->leader; struct perf_evsel *leader = evsel->leader;
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
...@@ -812,8 +813,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) ...@@ -812,8 +813,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
if (perf_evsel__is_function_event(evsel)) if (perf_evsel__is_function_event(evsel))
evsel->attr.exclude_callchain_user = 1; evsel->attr.exclude_callchain_user = 1;
if (callchain_param.enabled && !evsel->no_aux_samples) if (callchain && callchain->enabled && !evsel->no_aux_samples)
perf_evsel__config_callgraph(evsel, opts, &callchain_param); perf_evsel__config_callgraph(evsel, opts, callchain);
if (opts->sample_intr_regs) { if (opts->sample_intr_regs) {
attr->sample_regs_intr = opts->sample_intr_regs; attr->sample_regs_intr = opts->sample_intr_regs;
......
...@@ -178,8 +178,11 @@ void perf_evsel__init(struct perf_evsel *evsel, ...@@ -178,8 +178,11 @@ void perf_evsel__init(struct perf_evsel *evsel,
void perf_evsel__exit(struct perf_evsel *evsel); void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel); void perf_evsel__delete(struct perf_evsel *evsel);
struct callchain_param;
void perf_evsel__config(struct perf_evsel *evsel, void perf_evsel__config(struct perf_evsel *evsel,
struct record_opts *opts); struct record_opts *opts,
struct callchain_param *callchain);
int __perf_evsel__sample_size(u64 sample_type); int __perf_evsel__sample_size(u64 sample_type);
void perf_evsel__calc_id_pos(struct perf_evsel *evsel); void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
......
...@@ -129,7 +129,8 @@ bool perf_can_record_cpu_wide(void) ...@@ -129,7 +129,8 @@ bool perf_can_record_cpu_wide(void)
return true; return true;
} }
void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts) void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
struct callchain_param *callchain)
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel;
bool use_sample_identifier = false; bool use_sample_identifier = false;
...@@ -148,7 +149,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts) ...@@ -148,7 +149,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
use_comm_exec = perf_can_comm_exec(); use_comm_exec = perf_can_comm_exec();
evlist__for_each(evlist, evsel) { evlist__for_each(evlist, evsel) {
perf_evsel__config(evsel, opts); perf_evsel__config(evsel, opts, callchain);
if (evsel->tracking && use_comm_exec) if (evsel->tracking && use_comm_exec)
evsel->attr.comm_exec = 1; evsel->attr.comm_exec = 1;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册