diff options
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r-- | tools/perf/util/evlist.c | 32 |
1 files changed, 17 insertions, 15 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index c4489a1ad6bc..ff415680fe0a 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -37,6 +37,8 @@ #include <perf/evsel.h> #include <perf/cpumap.h> +#include <internal/xyarray.h> + #ifdef LACKS_SIGQUEUE_PROTOTYPE int sigqueue(pid_t pid, int sig, const union sigval value); #endif @@ -314,7 +316,7 @@ static int perf_evlist__nr_threads(struct evlist *evlist, if (evsel->system_wide) return 1; else - return thread_map__nr(evlist->core.threads); + return perf_thread_map__nr(evlist->core.threads); } void evlist__disable(struct evlist *evlist) @@ -370,7 +372,7 @@ static int perf_evlist__enable_event_thread(struct evlist *evlist, int thread) { int cpu; - int nr_cpus = cpu_map__nr(evlist->core.cpus); + int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); if (!evsel->core.fd) return -EINVAL; @@ -386,7 +388,7 @@ static int perf_evlist__enable_event_thread(struct evlist *evlist, int perf_evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) { - bool per_cpu_mmaps = !cpu_map__empty(evlist->core.cpus); + bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); if (per_cpu_mmaps) return perf_evlist__enable_event_cpu(evlist, evsel, idx); @@ -396,8 +398,8 @@ int perf_evlist__enable_event_idx(struct evlist *evlist, int perf_evlist__alloc_pollfd(struct evlist *evlist) { - int nr_cpus = cpu_map__nr(evlist->core.cpus); - int nr_threads = thread_map__nr(evlist->core.threads); + int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); + int nr_threads = perf_thread_map__nr(evlist->core.threads); int nfds = 0; struct evsel *evsel; @@ -529,7 +531,7 @@ static void perf_evlist__set_sid_idx(struct evlist *evlist, else sid->cpu = -1; if (!evsel->system_wide && evlist->core.threads && thread >= 0) - sid->tid = thread_map__pid(evlist->core.threads, thread); + sid->tid = perf_thread_map__pid(evlist->core.threads, thread); else sid->tid = -1; } @@ -692,9 +694,9 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist, int i; struct perf_mmap *map; - evlist->nr_mmaps = cpu_map__nr(evlist->core.cpus); - if (cpu_map__empty(evlist->core.cpus)) - evlist->nr_mmaps = thread_map__nr(evlist->core.threads); + evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus); + if (perf_cpu_map__empty(evlist->core.cpus)) + evlist->nr_mmaps = perf_thread_map__nr(evlist->core.threads); map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); if (!map) return NULL; @@ -758,7 +760,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx, if (evsel->system_wide && thread) continue; - cpu = cpu_map__idx(evsel->core.cpus, evlist_cpu); + cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu); if (cpu == -1) continue; @@ -807,8 +809,8 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist, struct mmap_params *mp) { int cpu, thread; - int nr_cpus = cpu_map__nr(evlist->core.cpus); - int nr_threads = thread_map__nr(evlist->core.threads); + int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); + int nr_threads = perf_thread_map__nr(evlist->core.threads); pr_debug2("perf event ring buffer mmapped per cpu\n"); for (cpu = 0; cpu < nr_cpus; cpu++) { @@ -836,7 +838,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist, struct mmap_params *mp) { int thread; - int nr_threads = thread_map__nr(evlist->core.threads); + int nr_threads = perf_thread_map__nr(evlist->core.threads); pr_debug2("perf event ring buffer mmapped per thread\n"); for (thread = 0; thread < nr_threads; thread++) { @@ -1014,11 +1016,11 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages, evlist__for_each_entry(evlist, evsel) { if ((evsel->core.attr.read_format & PERF_FORMAT_ID) && evsel->sample_id == NULL && - perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) + perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0) return -ENOMEM; } - if (cpu_map__empty(cpus)) + if (perf_cpu_map__empty(cpus)) return perf_evlist__mmap_per_thread(evlist, &mp); return perf_evlist__mmap_per_cpu(evlist, &mp); |