提交 0df6ade7 编写于 作者: I Ian Rogers 提交者: Arnaldo Carvalho de Melo

perf evlist: Rename cpus to user_requested_cpus

evlist contains cpus and all_cpus. all_cpus is the union of the cpu maps
of all evsels.

For non-task targets, cpus is set to be cpus requested from the command
line, defaulting to all online cpus if no cpus are specified.

For an uncore event, all_cpus may be just CPU 0 or every online CPU.

This causes all_cpus to have fewer values than the cpus variable which
is confusing given the 'all' in the name.

To try to make the behavior clearer, rename cpus to user_requested_cpus
and add comments on the two struct variables.
Signed-off-by: NIan Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Antonov <alexander.antonov@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: German Gomez <german.gomez@arm.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: John Garry <john.garry@huawei.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yonghong Song <yhs@fb.com>
Cc: bpf@vger.kernel.org
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: netdev@vger.kernel.org
Link: http://lore.kernel.org/lkml/20220328232648.2127340-3-irogers@google.comSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
上级 d4ff9265
...@@ -41,10 +41,10 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, ...@@ -41,10 +41,10 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
*/ */
if (!evsel->own_cpus || evlist->has_user_cpus) { if (!evsel->own_cpus || evlist->has_user_cpus) {
perf_cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evlist->cpus); evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) { } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->user_requested_cpus)) {
perf_cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evlist->cpus); evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (evsel->cpus != evsel->own_cpus) { } else if (evsel->cpus != evsel->own_cpus) {
perf_cpu_map__put(evsel->cpus); perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evsel->own_cpus); evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
...@@ -123,10 +123,10 @@ static void perf_evlist__purge(struct perf_evlist *evlist) ...@@ -123,10 +123,10 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist) void perf_evlist__exit(struct perf_evlist *evlist)
{ {
perf_cpu_map__put(evlist->cpus); perf_cpu_map__put(evlist->user_requested_cpus);
perf_cpu_map__put(evlist->all_cpus); perf_cpu_map__put(evlist->all_cpus);
perf_thread_map__put(evlist->threads); perf_thread_map__put(evlist->threads);
evlist->cpus = NULL; evlist->user_requested_cpus = NULL;
evlist->all_cpus = NULL; evlist->all_cpus = NULL;
evlist->threads = NULL; evlist->threads = NULL;
fdarray__exit(&evlist->pollfd); fdarray__exit(&evlist->pollfd);
...@@ -155,9 +155,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, ...@@ -155,9 +155,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
* original reference count of 1. If that is not the case it is up to * original reference count of 1. If that is not the case it is up to
* the caller to increase the reference count. * the caller to increase the reference count.
*/ */
if (cpus != evlist->cpus) { if (cpus != evlist->user_requested_cpus) {
perf_cpu_map__put(evlist->cpus); perf_cpu_map__put(evlist->user_requested_cpus);
evlist->cpus = perf_cpu_map__get(cpus); evlist->user_requested_cpus = perf_cpu_map__get(cpus);
} }
if (threads != evlist->threads) { if (threads != evlist->threads) {
...@@ -294,7 +294,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist, ...@@ -294,7 +294,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{ {
int nr_cpus = perf_cpu_map__nr(evlist->cpus); int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
int nr_threads = perf_thread_map__nr(evlist->threads); int nr_threads = perf_thread_map__nr(evlist->threads);
int nfds = 0; int nfds = 0;
struct perf_evsel *evsel; struct perf_evsel *evsel;
...@@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, ...@@ -426,7 +426,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx, int idx, struct perf_mmap_param *mp, int cpu_idx,
int thread, int *_output, int *_output_overwrite) int thread, int *_output, int *_output_overwrite)
{ {
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx); struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->user_requested_cpus, cpu_idx);
struct perf_evsel *evsel; struct perf_evsel *evsel;
int revent; int revent;
...@@ -536,7 +536,7 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, ...@@ -536,7 +536,7 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp) struct perf_mmap_param *mp)
{ {
int nr_threads = perf_thread_map__nr(evlist->threads); int nr_threads = perf_thread_map__nr(evlist->threads);
int nr_cpus = perf_cpu_map__nr(evlist->cpus); int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
int cpu, thread; int cpu, thread;
for (cpu = 0; cpu < nr_cpus; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
...@@ -564,8 +564,8 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist) ...@@ -564,8 +564,8 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
{ {
int nr_mmaps; int nr_mmaps;
nr_mmaps = perf_cpu_map__nr(evlist->cpus); nr_mmaps = perf_cpu_map__nr(evlist->user_requested_cpus);
if (perf_cpu_map__empty(evlist->cpus)) if (perf_cpu_map__empty(evlist->user_requested_cpus))
nr_mmaps = perf_thread_map__nr(evlist->threads); nr_mmaps = perf_thread_map__nr(evlist->threads);
return nr_mmaps; return nr_mmaps;
...@@ -576,7 +576,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist, ...@@ -576,7 +576,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_mmap_param *mp) struct perf_mmap_param *mp)
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel;
const struct perf_cpu_map *cpus = evlist->cpus; const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
const struct perf_thread_map *threads = evlist->threads; const struct perf_thread_map *threads = evlist->threads;
if (!ops || !ops->get || !ops->mmap) if (!ops || !ops->get || !ops->mmap)
......
...@@ -19,7 +19,12 @@ struct perf_evlist { ...@@ -19,7 +19,12 @@ struct perf_evlist {
int nr_entries; int nr_entries;
int nr_groups; int nr_groups;
bool has_user_cpus; bool has_user_cpus;
struct perf_cpu_map *cpus; /**
* The cpus passed from the command line or all online CPUs by
* default.
*/
struct perf_cpu_map *user_requested_cpus;
/** The union of all evsel cpu maps. */
struct perf_cpu_map *all_cpus; struct perf_cpu_map *all_cpus;
struct perf_thread_map *threads; struct perf_thread_map *threads;
int nr_mmaps; int nr_mmaps;
......
...@@ -199,7 +199,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr, ...@@ -199,7 +199,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
struct evsel *evsel, u32 option) struct evsel *evsel, u32 option)
{ {
int i, err = -EINVAL; int i, err = -EINVAL;
struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus; struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* Set option of each CPU we have */ /* Set option of each CPU we have */
...@@ -299,7 +299,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, ...@@ -299,7 +299,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
container_of(itr, struct cs_etm_recording, itr); container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct evsel *evsel, *cs_etm_evsel = NULL; struct evsel *evsel, *cs_etm_evsel = NULL;
struct perf_cpu_map *cpus = evlist->core.cpus; struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1); bool privileged = perf_event_paranoid_check(-1);
int err = 0; int err = 0;
...@@ -522,7 +522,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, ...@@ -522,7 +522,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
{ {
int i; int i;
int etmv3 = 0, etmv4 = 0, ete = 0; int etmv3 = 0, etmv4 = 0, ete = 0;
struct perf_cpu_map *event_cpus = evlist->core.cpus; struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */ /* cpu map is not empty, we have specific CPUs to work with */
...@@ -713,7 +713,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, ...@@ -713,7 +713,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
u32 offset; u32 offset;
u64 nr_cpu, type; u64 nr_cpu, type;
struct perf_cpu_map *cpu_map; struct perf_cpu_map *cpu_map;
struct perf_cpu_map *event_cpus = session->evlist->core.cpus; struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
struct cs_etm_recording *ptr = struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr); container_of(itr, struct cs_etm_recording, itr);
......
...@@ -144,7 +144,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, ...@@ -144,7 +144,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
container_of(itr, struct arm_spe_recording, itr); container_of(itr, struct arm_spe_recording, itr);
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu; struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
struct evsel *evsel, *arm_spe_evsel = NULL; struct evsel *evsel, *arm_spe_evsel = NULL;
struct perf_cpu_map *cpus = evlist->core.cpus; struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1); bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel; struct evsel *tracking_evsel;
int err; int err;
......
...@@ -110,7 +110,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, ...@@ -110,7 +110,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
container_of(itr, struct intel_bts_recording, itr); container_of(itr, struct intel_bts_recording, itr);
struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu; struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
struct evsel *evsel, *intel_bts_evsel = NULL; struct evsel *evsel, *intel_bts_evsel = NULL;
const struct perf_cpu_map *cpus = evlist->core.cpus; const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1); bool privileged = perf_event_paranoid_check(-1);
if (opts->auxtrace_sample_mode) { if (opts->auxtrace_sample_mode) {
......
...@@ -382,7 +382,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ...@@ -382,7 +382,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
ui__warning("Intel Processor Trace: TSC not available\n"); ui__warning("Intel Processor Trace: TSC not available\n");
} }
per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus); per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
...@@ -632,7 +632,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, ...@@ -632,7 +632,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
bool have_timing_info, need_immediate = false; bool have_timing_info, need_immediate = false;
struct evsel *evsel, *intel_pt_evsel = NULL; struct evsel *evsel, *intel_pt_evsel = NULL;
const struct perf_cpu_map *cpus = evlist->core.cpus; const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1); bool privileged = perf_event_paranoid_check(-1);
u64 tsc_bit; u64 tsc_bit;
int err; int err;
......
...@@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr) ...@@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr)
init_stats(&time_stats); init_stats(&time_stats);
printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.cpus)); printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.user_requested_cpus));
printf(" Number of threads:\t%d\n", evlist->core.threads->nr); printf(" Number of threads:\t%d\n", evlist->core.threads->nr);
printf(" Number of events:\t%d (%d fds)\n", printf(" Number of events:\t%d (%d fds)\n",
evlist->core.nr_entries, evlist__count_evsel_fds(evlist)); evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
......
...@@ -301,7 +301,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap) ...@@ -301,7 +301,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
static int set_tracing_cpu(struct perf_ftrace *ftrace) static int set_tracing_cpu(struct perf_ftrace *ftrace)
{ {
struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus; struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
if (!target__has_cpu(&ftrace->target)) if (!target__has_cpu(&ftrace->target))
return 0; return 0;
......
...@@ -987,7 +987,7 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru ...@@ -987,7 +987,7 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
int m, tm, nr_mmaps = evlist->core.nr_mmaps; int m, tm, nr_mmaps = evlist->core.nr_mmaps;
struct mmap *mmap = evlist->mmap; struct mmap *mmap = evlist->mmap;
struct mmap *overwrite_mmap = evlist->overwrite_mmap; struct mmap *overwrite_mmap = evlist->overwrite_mmap;
struct perf_cpu_map *cpus = evlist->core.cpus; struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits, thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
thread_data->mask->maps.nbits); thread_data->mask->maps.nbits);
...@@ -1881,7 +1881,7 @@ static int record__synthesize(struct record *rec, bool tail) ...@@ -1881,7 +1881,7 @@ static int record__synthesize(struct record *rec, bool tail)
return err; return err;
} }
err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus, err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.user_requested_cpus,
process_synthesized_event, NULL); process_synthesized_event, NULL);
if (err < 0) { if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n"); pr_err("Couldn't synthesize cpu map.\n");
...@@ -3675,7 +3675,7 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu ...@@ -3675,7 +3675,7 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
static int record__init_thread_masks(struct record *rec) static int record__init_thread_masks(struct record *rec)
{ {
int ret = 0; int ret = 0;
struct perf_cpu_map *cpus = rec->evlist->core.cpus; struct perf_cpu_map *cpus = rec->evlist->core.user_requested_cpus;
if (!record__threads_enabled(rec)) if (!record__threads_enabled(rec))
return record__init_thread_default_masks(rec, cpus); return record__init_thread_default_masks(rec, cpus);
......
...@@ -804,7 +804,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) ...@@ -804,7 +804,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (group) if (group)
evlist__set_leader(evsel_list); evlist__set_leader(evsel_list);
if (!cpu_map__is_dummy(evsel_list->core.cpus)) { if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0) if (affinity__setup(&saved_affinity) < 0)
return -1; return -1;
affinity = &saved_affinity; affinity = &saved_affinity;
...@@ -1458,7 +1458,7 @@ static int perf_stat_init_aggr_mode(void) ...@@ -1458,7 +1458,7 @@ static int perf_stat_init_aggr_mode(void)
aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
if (get_id) { if (get_id) {
stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
get_id, /*data=*/NULL); get_id, /*data=*/NULL);
if (!stat_config.aggr_map) { if (!stat_config.aggr_map) {
pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
...@@ -1472,8 +1472,8 @@ static int perf_stat_init_aggr_mode(void) ...@@ -1472,8 +1472,8 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of * taking the highest cpu number to be the size of
* the aggregation translate cpumap. * the aggregation translate cpumap.
*/ */
if (evsel_list->core.cpus) if (evsel_list->core.user_requested_cpus)
nr = perf_cpu_map__max(evsel_list->core.cpus).cpu; nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
else else
nr = 0; nr = 0;
stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
...@@ -1630,7 +1630,7 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st) ...@@ -1630,7 +1630,7 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
if (!get_id) if (!get_id)
return 0; return 0;
stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, get_id, env); stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env);
if (!stat_config.aggr_map) { if (!stat_config.aggr_map) {
pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
return -1; return -1;
......
...@@ -1021,7 +1021,7 @@ static int perf_top__start_counters(struct perf_top *top) ...@@ -1021,7 +1021,7 @@ static int perf_top__start_counters(struct perf_top *top)
evlist__for_each_entry(evlist, counter) { evlist__for_each_entry(evlist, counter) {
try_again: try_again:
if (evsel__open(counter, top->evlist->core.cpus, if (evsel__open(counter, top->evlist->core.user_requested_cpus,
top->evlist->core.threads) < 0) { top->evlist->core.threads) < 0) {
/* /*
......
...@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, ...@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
mp->idx = idx; mp->idx = idx;
if (per_cpu) { if (per_cpu) {
mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx); mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx);
if (evlist->core.threads) if (evlist->core.threads)
mp->tid = perf_thread_map__pid(evlist->core.threads, 0); mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else else
......
...@@ -38,7 +38,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) ...@@ -38,7 +38,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
/* don't need to set cpu filter for system-wide mode */ /* don't need to set cpu filter for system-wide mode */
if (ftrace->target.cpu_list) { if (ftrace->target.cpu_list) {
ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus); ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
} }
...@@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) ...@@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
fd = bpf_map__fd(skel->maps.cpu_filter); fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) { for (i = 0; i < ncpus; i++) {
cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu; cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
} }
} }
......
...@@ -440,7 +440,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) ...@@ -440,7 +440,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
bool has_imm = false; bool has_imm = false;
// See explanation in evlist__close() // See explanation in evlist__close()
if (!cpu_map__is_dummy(evlist->core.cpus)) { if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0) if (affinity__setup(&saved_affinity) < 0)
return; return;
affinity = &saved_affinity; affinity = &saved_affinity;
...@@ -500,7 +500,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name) ...@@ -500,7 +500,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
struct affinity saved_affinity, *affinity = NULL; struct affinity saved_affinity, *affinity = NULL;
// See explanation in evlist__close() // See explanation in evlist__close()
if (!cpu_map__is_dummy(evlist->core.cpus)) { if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0) if (affinity__setup(&saved_affinity) < 0)
return; return;
affinity = &saved_affinity; affinity = &saved_affinity;
...@@ -565,7 +565,7 @@ static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, ...@@ -565,7 +565,7 @@ static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel,
static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread) static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
{ {
int cpu; int cpu;
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
if (!evsel->core.fd) if (!evsel->core.fd)
return -EINVAL; return -EINVAL;
...@@ -580,7 +580,7 @@ static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evse ...@@ -580,7 +580,7 @@ static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evse
int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
{ {
bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
if (per_cpu_mmaps) if (per_cpu_mmaps)
return evlist__enable_event_cpu(evlist, evsel, idx); return evlist__enable_event_cpu(evlist, evsel, idx);
...@@ -1301,10 +1301,11 @@ void evlist__close(struct evlist *evlist) ...@@ -1301,10 +1301,11 @@ void evlist__close(struct evlist *evlist)
struct affinity affinity; struct affinity affinity;
/* /*
* With perf record core.cpus is usually NULL. * With perf record core.user_requested_cpus is usually NULL.
* Use the old method to handle this for now. * Use the old method to handle this for now.
*/ */
if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) { if (!evlist->core.user_requested_cpus ||
cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
evlist__for_each_entry_reverse(evlist, evsel) evlist__for_each_entry_reverse(evlist, evsel)
evsel__close(evsel); evsel__close(evsel);
return; return;
...@@ -1367,7 +1368,7 @@ int evlist__open(struct evlist *evlist) ...@@ -1367,7 +1368,7 @@ int evlist__open(struct evlist *evlist)
* Default: one fd per CPU, all threads, aka systemwide * Default: one fd per CPU, all threads, aka systemwide
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
*/ */
if (evlist->core.threads == NULL && evlist->core.cpus == NULL) { if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
err = evlist__create_syswide_maps(evlist); err = evlist__create_syswide_maps(evlist);
if (err < 0) if (err < 0)
goto out_err; goto out_err;
......
...@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call ...@@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call
if (opts->group) if (opts->group)
evlist__set_leader(evlist); evlist__set_leader(evlist);
if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0) if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
opts->no_inherit = true; opts->no_inherit = true;
use_comm_exec = perf_can_comm_exec(); use_comm_exec = perf_can_comm_exec();
...@@ -244,7 +244,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) ...@@ -244,7 +244,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
evsel = evlist__last(temp_evlist); evsel = evlist__last(temp_evlist);
if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) { if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
if (cpus) if (cpus)
...@@ -252,7 +252,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) ...@@ -252,7 +252,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
perf_cpu_map__put(cpus); perf_cpu_map__put(cpus);
} else { } else {
cpu = perf_cpu_map__cpu(evlist->core.cpus, 0); cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
} }
while (1) { while (1) {
......
...@@ -114,7 +114,8 @@ int evlist__start_sb_thread(struct evlist *evlist, struct target *target) ...@@ -114,7 +114,8 @@ int evlist__start_sb_thread(struct evlist *evlist, struct target *target)
} }
evlist__for_each_entry(evlist, counter) { evlist__for_each_entry(evlist, counter) {
if (evsel__open(counter, evlist->core.cpus, evlist->core.threads) < 0) if (evsel__open(counter, evlist->core.user_requested_cpus,
evlist->core.threads) < 0)
goto out_delete_evlist; goto out_delete_evlist;
} }
......
...@@ -929,7 +929,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config, ...@@ -929,7 +929,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
int all_idx; int all_idx;
struct perf_cpu cpu; struct perf_cpu cpu;
perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.cpus) { perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
struct evsel *counter; struct evsel *counter;
bool first = true; bool first = true;
......
...@@ -2127,7 +2127,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p ...@@ -2127,7 +2127,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p
return err; return err;
} }
err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL); err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
if (err < 0) { if (err < 0) {
pr_err("Couldn't synthesize thread map.\n"); pr_err("Couldn't synthesize thread map.\n");
return err; return err;
......
...@@ -95,15 +95,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) ...@@ -95,15 +95,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
if (target->cpu_list) if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "", perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
? "s" : "",
target->cpu_list); target->cpu_list);
else { else {
if (target->tid) if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")"); ret += SNPRINTF(bf + ret, size - ret, ")");
else else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
perf_cpu_map__nr(top->evlist->core.cpus), perf_cpu_map__nr(top->evlist->core.user_requested_cpus),
perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : ""); perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
? "s" : "");
} }
perf_top__reset_sample_counters(top); perf_top__reset_sample_counters(top);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册