diff options
author | Ian Rogers <irogers@google.com> | 2022-03-28 16:26:44 -0700 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2022-04-01 16:19:35 -0300 |
commit | 0df6ade7119daa40904b0c18871169e753663e14 (patch) | |
tree | 6794e8ea61c31084b4728f4f88a340c7e1ab23f6 /tools/perf/util | |
parent | d4ff92659244a4783e424fa41b6f83645d8920c1 (diff) |
perf evlist: Rename cpus to user_requested_cpus
evlist contains cpus and all_cpus. all_cpus is the union of the cpu maps
of all evsels.
For non-task targets, cpus is set to be cpus requested from the command
line, defaulting to all online cpus if no cpus are specified.
For an uncore event, all_cpus may be just CPU 0 or every online CPU.
This causes all_cpus to have fewer values than the cpus variable which
is confusing given the 'all' in the name.
To try to make the behavior clearer, rename cpus to user_requested_cpus
and add comments on the two struct variables.
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Antonov <alexander.antonov@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: German Gomez <german.gomez@arm.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: John Garry <john.garry@huawei.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yonghong Song <yhs@fb.com>
Cc: bpf@vger.kernel.org
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: netdev@vger.kernel.org
Link: http://lore.kernel.org/lkml/20220328232648.2127340-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util')
-rw-r--r-- | tools/perf/util/auxtrace.c | 2 | ||||
-rw-r--r-- | tools/perf/util/bpf_ftrace.c | 4 | ||||
-rw-r--r-- | tools/perf/util/evlist.c | 15 | ||||
-rw-r--r-- | tools/perf/util/record.c | 6 | ||||
-rw-r--r-- | tools/perf/util/sideband_evlist.c | 3 | ||||
-rw-r--r-- | tools/perf/util/stat-display.c | 2 | ||||
-rw-r--r-- | tools/perf/util/synthetic-events.c | 2 | ||||
-rw-r--r-- | tools/perf/util/top.c | 8 |
8 files changed, 23 insertions, 19 deletions
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 9e48652662d4..df1c5bbbaa0d 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, mp->idx = idx; if (per_cpu) { - mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx); + mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx); if (evlist->core.threads) mp->tid = perf_thread_map__pid(evlist->core.threads, 0); else diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c index 4f4d3aaff37c..7a4297d8fd2c 100644 --- a/tools/perf/util/bpf_ftrace.c +++ b/tools/perf/util/bpf_ftrace.c @@ -38,7 +38,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) /* don't need to set cpu filter for system-wide mode */ if (ftrace->target.cpu_list) { - ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus); + ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus); bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); } @@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) fd = bpf_map__fd(skel->maps.cpu_filter); for (i = 0; i < ncpus; i++) { - cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu; + cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu; bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); } } diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 9bb79e049957..cb2cf4463c08 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -440,7 +440,7 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name) bool has_imm = false; // See explanation in evlist__close() - if (!cpu_map__is_dummy(evlist->core.cpus)) { + if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { if (affinity__setup(&saved_affinity) < 0) return; affinity = &saved_affinity; @@ -500,7 +500,7 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name) struct affinity saved_affinity, *affinity = NULL; // See explanation in evlist__close() - if (!cpu_map__is_dummy(evlist->core.cpus)) { + if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { if (affinity__setup(&saved_affinity) < 0) return; affinity = &saved_affinity; @@ -565,7 +565,7 @@ static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread) { int cpu; - int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); + int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); if (!evsel->core.fd) return -EINVAL; @@ -580,7 +580,7 @@ static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evse int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) { - bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); + bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus); if (per_cpu_mmaps) return evlist__enable_event_cpu(evlist, evsel, idx); @@ -1301,10 +1301,11 @@ void evlist__close(struct evlist *evlist) struct affinity affinity; /* - * With perf record core.cpus is usually NULL. + * With perf record core.user_requested_cpus is usually NULL. * Use the old method to handle this for now. */ - if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) { + if (!evlist->core.user_requested_cpus || + cpu_map__is_dummy(evlist->core.user_requested_cpus)) { evlist__for_each_entry_reverse(evlist, evsel) evsel__close(evsel); return; @@ -1367,7 +1368,7 @@ int evlist__open(struct evlist *evlist) * Default: one fd per CPU, all threads, aka systemwide * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL */ - if (evlist->core.threads == NULL && evlist->core.cpus == NULL) { + if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { err = evlist__create_syswide_maps(evlist); if (err < 0) goto out_err; diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index 007a64681416..5b09ecbb05dc 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c @@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call if (opts->group) evlist__set_leader(evlist); - if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0) + if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0) opts->no_inherit = true; use_comm_exec = perf_can_comm_exec(); @@ -244,7 +244,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) evsel = evlist__last(temp_evlist); - if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) { + if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) { struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); if (cpus) @@ -252,7 +252,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) perf_cpu_map__put(cpus); } else { - cpu = perf_cpu_map__cpu(evlist->core.cpus, 0); + cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0); } while (1) { diff --git a/tools/perf/util/sideband_evlist.c b/tools/perf/util/sideband_evlist.c index 748371ac22be..388846f17bc1 100644 --- a/tools/perf/util/sideband_evlist.c +++ b/tools/perf/util/sideband_evlist.c @@ -114,7 +114,8 @@ int evlist__start_sb_thread(struct evlist *evlist, struct target *target) } evlist__for_each_entry(evlist, counter) { - if (evsel__open(counter, evlist->core.cpus, evlist->core.threads) < 0) + if (evsel__open(counter, evlist->core.user_requested_cpus, + evlist->core.threads) < 0) goto out_delete_evlist; } diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 9cbe351b141f..138e3ab9d638 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -929,7 +929,7 @@ static void print_no_aggr_metric(struct perf_stat_config *config, int all_idx; struct perf_cpu cpu; - perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.cpus) { + perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) { struct evsel *counter; bool first = true; diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c index b654de0841f8..27acdc5e5723 100644 --- a/tools/perf/util/synthetic-events.c +++ b/tools/perf/util/synthetic-events.c @@ -2127,7 +2127,7 @@ int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct p return err; } - err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL); + err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL); if (err < 0) { pr_err("Couldn't synthesize thread map.\n"); return err; diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index c1ebfc5d2e0c..b8b32431d2f7 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c @@ -95,15 +95,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) if (target->cpu_list) ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", - perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "", + perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1 + ? "s" : "", target->cpu_list); else { if (target->tid) ret += SNPRINTF(bf + ret, size - ret, ")"); else ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", - perf_cpu_map__nr(top->evlist->core.cpus), - perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : ""); + perf_cpu_map__nr(top->evlist->core.user_requested_cpus), + perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1 + ? "s" : ""); } perf_top__reset_sample_counters(top); |