diff options
Diffstat (limited to 'tools/perf/util')
-rw-r--r-- | tools/perf/util/Build | 2 | ||||
-rw-r--r-- | tools/perf/util/affinity.c | 8 | ||||
-rw-r--r-- | tools/perf/util/arm-spe.c | 2 | ||||
-rw-r--r-- | tools/perf/util/bpf_counter_cgroup.c | 10 | ||||
-rw-r--r-- | tools/perf/util/bpf_skel/bperf_cgroup.bpf.c | 13 | ||||
-rw-r--r-- | tools/perf/util/bpf_skel/off_cpu.bpf.c | 18 | ||||
-rw-r--r-- | tools/perf/util/genelf.c | 34 | ||||
-rw-r--r-- | tools/perf/util/genelf.h | 4 | ||||
-rw-r--r-- | tools/perf/util/metricgroup.c | 3 | ||||
-rw-r--r-- | tools/perf/util/parse-events-hybrid.c | 21 | ||||
-rw-r--r-- | tools/perf/util/parse-events.c | 39 | ||||
-rw-r--r-- | tools/perf/util/parse-events.h | 1 | ||||
-rw-r--r-- | tools/perf/util/print-events.c | 39 | ||||
-rw-r--r-- | tools/perf/util/scripting-engines/Build | 2 | ||||
-rw-r--r-- | tools/perf/util/stat-shadow.c | 24 | ||||
-rw-r--r-- | tools/perf/util/symbol-elf.c | 7 | ||||
-rw-r--r-- | tools/perf/util/synthetic-events.c | 17 |
17 files changed, 157 insertions, 87 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 9dfae1bda9cc..485e1a343165 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -269,7 +269,7 @@ CFLAGS_expr-flex.o += $(flex_flags) bison_flags := -DYYENABLE_NLS=0 BISON_GE_35 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 35) ifeq ($(BISON_GE_35),1) - bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum + bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum -Wno-unused-but-set-variable -Wno-unknown-warning-option else bison_flags += -w endif diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c index 4d216c0dc425..4ee96b3c755b 100644 --- a/tools/perf/util/affinity.c +++ b/tools/perf/util/affinity.c @@ -49,8 +49,14 @@ void affinity__set(struct affinity *a, int cpu) { int cpu_set_size = get_cpu_set_size(); - if (cpu == -1) + /* + * Return: + * - if cpu is -1 + * - restrict out of bound access to sched_cpus + */ + if (cpu == -1 || ((cpu >= (cpu_set_size * 8)))) return; + a->changed = true; set_bit(cpu, a->sched_cpus); /* diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 22dcfe07e886..906476a839e1 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -498,7 +498,7 @@ static void arm_spe__synth_data_source_generic(const struct arm_spe_record *reco static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr) { union perf_mem_data_src data_src = { 0 }; - bool is_neoverse = is_midr_in_range(midr, neoverse_spe); + bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe); if (record->op == ARM_SPE_LD) data_src.mem_op = PERF_MEM_OP_LOAD; diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c index 63b9db657442..3c2df7522f6f 100644 --- a/tools/perf/util/bpf_counter_cgroup.c +++ b/tools/perf/util/bpf_counter_cgroup.c @@ -95,7 +95,7 @@ static int bperf_load_program(struct evlist *evlist) perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) { link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch, - FD(cgrp_switch, cpu.cpu)); + FD(cgrp_switch, i)); if (IS_ERR(link)) { pr_err("Failed to attach cgroup program\n"); err = PTR_ERR(link); @@ -115,15 +115,15 @@ static int bperf_load_program(struct evlist *evlist) evsel->cgrp = NULL; /* open single copy of the events w/o cgroup */ - err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1); + err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1); if (err) { pr_err("Failed to open first cgroup events\n"); goto out; } map_fd = bpf_map__fd(skel->maps.events); - perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) { - int fd = FD(evsel, cpu.cpu); + perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) { + int fd = FD(evsel, j); __u32 idx = evsel->core.idx * total_cpus + cpu.cpu; err = bpf_map_update_elem(map_fd, &idx, &fd, @@ -269,7 +269,7 @@ static int bperf_cgrp__read(struct evsel *evsel) goto out; } - perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) { + perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) { counts = perf_counts(evsel->counts, i, 0); counts->val = values[cpu.cpu].counter; counts->ena = values[cpu.cpu].enabled; diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c index 292c430768b5..9aa8cdd93de4 100644 --- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c @@ -48,6 +48,7 @@ const volatile __u32 num_cpus = 1; int enabled = 0; int use_cgroup_v2 = 0; +int perf_subsys_id = -1; static inline int get_cgroup_v1_idx(__u32 *cgrps, int size) { @@ -58,7 +59,15 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size) int level; int cnt; - cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_event_cgrp_id], cgroup); + if (perf_subsys_id == -1) { +#if __has_builtin(__builtin_preserve_enum_value) + perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id, + perf_event_cgrp_id); +#else + perf_subsys_id = perf_event_cgrp_id; +#endif + } + cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup); level = BPF_CORE_READ(cgrp, level); for (cnt = 0; i < MAX_LEVELS; i++) { @@ -176,7 +185,7 @@ static int bperf_cgroup_count(void) } // This will be attached to cgroup-switches event for each cpu -SEC("perf_events") +SEC("perf_event") int BPF_PROG(on_cgrp_switch) { return bperf_cgroup_count(); diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c index c4ba2bcf179f..38e3b287dbb2 100644 --- a/tools/perf/util/bpf_skel/off_cpu.bpf.c +++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c @@ -94,6 +94,8 @@ const volatile bool has_prev_state = false; const volatile bool needs_cgroup = false; const volatile bool uses_cgroup_v1 = false; +int perf_subsys_id = -1; + /* * Old kernel used to call it task_struct->state and now it's '__state'. * Use BPF CO-RE "ignored suffix rule" to deal with it like below: @@ -119,11 +121,19 @@ static inline __u64 get_cgroup_id(struct task_struct *t) { struct cgroup *cgrp; - if (uses_cgroup_v1) - cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_event_cgrp_id], cgroup); - else - cgrp = BPF_CORE_READ(t, cgroups, dfl_cgrp); + if (!uses_cgroup_v1) + return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id); + + if (perf_subsys_id == -1) { +#if __has_builtin(__builtin_preserve_enum_value) + perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id, + perf_event_cgrp_id); +#else + perf_subsys_id = perf_event_cgrp_id; +#endif + } + cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup); return BPF_CORE_READ(cgrp, kn, id); } diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index 953338b9e887..d81b54563e96 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -30,10 +30,6 @@ #define BUILD_ID_URANDOM /* different uuid for each run */ -// FIXME, remove this and fix the deprecation warnings before its removed and -// We'll break for good here... -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - #ifdef HAVE_LIBCRYPTO_SUPPORT #define BUILD_ID_MD5 @@ -45,6 +41,7 @@ #endif #ifdef BUILD_ID_MD5 +#include <openssl/evp.h> #include <openssl/md5.h> #endif #endif @@ -142,15 +139,20 @@ gen_build_id(struct buildid_note *note, static void gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *code, size_t csize) { - MD5_CTX context; + EVP_MD_CTX *mdctx; if (sizeof(note->build_id) < 16) errx(1, "build_id too small for MD5"); - MD5_Init(&context); - MD5_Update(&context, &load_addr, sizeof(load_addr)); - MD5_Update(&context, code, csize); - MD5_Final((unsigned char *)note->build_id, &context); + mdctx = EVP_MD_CTX_new(); + if (!mdctx) + errx(2, "failed to create EVP_MD_CTX"); + + EVP_DigestInit_ex(mdctx, EVP_md5(), NULL); + EVP_DigestUpdate(mdctx, &load_addr, sizeof(load_addr)); + EVP_DigestUpdate(mdctx, code, csize); + EVP_DigestFinal_ex(mdctx, (unsigned char *)note->build_id, NULL); + EVP_MD_CTX_free(mdctx); } #endif @@ -251,6 +253,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, Elf_Data *d; Elf_Scn *scn; Elf_Ehdr *ehdr; + Elf_Phdr *phdr; Elf_Shdr *shdr; uint64_t eh_frame_base_offset; char *strsym = NULL; @@ -286,6 +289,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */ /* + * setup program header + */ + phdr = elf_newphdr(e, 1); + phdr[0].p_type = PT_LOAD; + phdr[0].p_offset = 0; + phdr[0].p_vaddr = 0; + phdr[0].p_paddr = 0; + phdr[0].p_filesz = csize; + phdr[0].p_memsz = csize; + phdr[0].p_flags = PF_X | PF_R; + phdr[0].p_align = 8; + + /* * setup text section */ scn = elf_newscn(e); diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h index ae138afe6c56..b5c909546e3f 100644 --- a/tools/perf/util/genelf.h +++ b/tools/perf/util/genelf.h @@ -53,8 +53,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #if GEN_ELF_CLASS == ELFCLASS64 #define elf_newehdr elf64_newehdr +#define elf_newphdr elf64_newphdr #define elf_getshdr elf64_getshdr #define Elf_Ehdr Elf64_Ehdr +#define Elf_Phdr Elf64_Phdr #define Elf_Shdr Elf64_Shdr #define Elf_Sym Elf64_Sym #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a) @@ -62,8 +64,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a) #else #define elf_newehdr elf32_newehdr +#define elf_newphdr elf32_newphdr #define elf_getshdr elf32_getshdr #define Elf_Ehdr Elf32_Ehdr +#define Elf_Phdr Elf32_Phdr #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a) diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 464475fd6b9a..c93bcaf6d55d 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -1655,6 +1655,9 @@ int metricgroup__parse_groups(const struct option *opt, struct evlist *perf_evlist = *(struct evlist **)opt->value; const struct pmu_events_table *table = pmu_events_table__find(); + if (!table) + return -EINVAL; + return parse_groups(perf_evlist, str, metric_no_group, metric_no_merge, NULL, metric_events, table); } diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c index 284f8eabd3b9..7c9f9150bad5 100644 --- a/tools/perf/util/parse-events-hybrid.c +++ b/tools/perf/util/parse-events-hybrid.c @@ -33,7 +33,8 @@ static void config_hybrid_attr(struct perf_event_attr *attr, * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. */ attr->type = type; - attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT); + attr->config = (attr->config & PERF_HW_EVENT_MASK) | + ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT); } static int create_event_hybrid(__u32 config_type, int *idx, @@ -48,13 +49,25 @@ static int create_event_hybrid(__u32 config_type, int *idx, __u64 config = attr->config; config_hybrid_attr(attr, config_type, pmu->type); + + /* + * Some hybrid hardware cache events are only available on one CPU + * PMU. For example, the 'L1-dcache-load-misses' is only available + * on cpu_core, while the 'L1-icache-loads' is only available on + * cpu_atom. We need to remove "not supported" hybrid cache events. + */ + if (attr->type == PERF_TYPE_HW_CACHE + && !is_event_supported(attr->type, attr->config)) + return 0; + evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id, pmu, config_terms); - if (evsel) + if (evsel) { evsel->pmu_name = strdup(pmu->name); - else + if (!evsel->pmu_name) + return -ENOMEM; + } else return -ENOMEM; - attr->type = type; attr->config = config; return 0; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index f05e15acd33f..f3b2c2a87456 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -28,6 +28,7 @@ #include "util/parse-events-hybrid.h" #include "util/pmu-hybrid.h" #include "tracepoint.h" +#include "thread_map.h" #define MAX_NAME_LEN 100 @@ -157,6 +158,44 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) +bool is_event_supported(u8 type, u64 config) +{ + bool ret = true; + int open_return; + struct evsel *evsel; + struct perf_event_attr attr = { + .type = type, + .config = config, + .disabled = 1, + }; + struct perf_thread_map *tmap = thread_map__new_by_tid(0); + + if (tmap == NULL) + return false; + + evsel = evsel__new(&attr); + if (evsel) { + open_return = evsel__open(evsel, NULL, tmap); + ret = open_return >= 0; + + if (open_return == -EACCES) { + /* + * This happens if the paranoid value + * /proc/sys/kernel/perf_event_paranoid is set to 2 + * Re-run with exclude_kernel set; we don't do that + * by default as some ARM machines do not support it. + * + */ + evsel->core.attr.exclude_kernel = 1; + ret = evsel__open(evsel, NULL, tmap) >= 0; + } + evsel__delete(evsel); + } + + perf_thread_map__put(tmap); + return ret; +} + const char *event_type(int type) { switch (type) { diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 7e6a601d9cd0..07df7bb7b042 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -19,6 +19,7 @@ struct option; struct perf_pmu; bool have_tracepoints(struct list_head *evlist); +bool is_event_supported(u8 type, u64 config); const char *event_type(int type); diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c index ba1ab5134685..c4d5d87fae2f 100644 --- a/tools/perf/util/print-events.c +++ b/tools/perf/util/print-events.c @@ -22,7 +22,6 @@ #include "probe-file.h" #include "string2.h" #include "strlist.h" -#include "thread_map.h" #include "tracepoint.h" #include "pfm.h" #include "pmu-hybrid.h" @@ -239,44 +238,6 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob, strlist__delete(sdtlist); } -static bool is_event_supported(u8 type, unsigned int config) -{ - bool ret = true; - int open_return; - struct evsel *evsel; - struct perf_event_attr attr = { - .type = type, - .config = config, - .disabled = 1, - }; - struct perf_thread_map *tmap = thread_map__new_by_tid(0); - - if (tmap == NULL) - return false; - - evsel = evsel__new(&attr); - if (evsel) { - open_return = evsel__open(evsel, NULL, tmap); - ret = open_return >= 0; - - if (open_return == -EACCES) { - /* - * This happens if the paranoid value - * /proc/sys/kernel/perf_event_paranoid is set to 2 - * Re-run with exclude_kernel set; we don't do that - * by default as some ARM machines do not support it. - * - */ - evsel->core.attr.exclude_kernel = 1; - ret = evsel__open(evsel, NULL, tmap) >= 0; - } - evsel__delete(evsel); - } - - perf_thread_map__put(tmap); - return ret; -} - int print_hwcache_events(const char *event_glob, bool name_only) { unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0; diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build index c92326c2233a..0f5ba28339cf 100644 --- a/tools/perf/util/scripting-engines/Build +++ b/tools/perf/util/scripting-engines/Build @@ -3,4 +3,4 @@ perf-$(CONFIG_LIBPYTHON) += trace-event-python.o CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum -CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-error=deprecated-declarations +CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-deprecated-declarations diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 979c8cb918f7..788ce5e46470 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -1193,7 +1193,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, &rsd); if (retiring > 0.7) color = PERF_COLOR_GREEN; - print_metric(config, ctxp, color, "%8.1f%%", "retiring", + print_metric(config, ctxp, color, "%8.1f%%", "Retiring", retiring * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) && full_td(cpu_map_idx, st, &rsd)) { @@ -1202,7 +1202,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, &rsd); if (fe_bound > 0.2) color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "frontend bound", + print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound", fe_bound * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) && full_td(cpu_map_idx, st, &rsd)) { @@ -1211,7 +1211,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, &rsd); if (be_bound > 0.2) color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "backend bound", + print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound", be_bound * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) && full_td(cpu_map_idx, st, &rsd)) { @@ -1220,7 +1220,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, &rsd); if (bad_spec > 0.1) color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "bad speculation", + print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation", bad_spec * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) && full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { @@ -1234,13 +1234,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, if (retiring > 0.7 && heavy_ops > 0.1) color = PERF_COLOR_GREEN; - print_metric(config, ctxp, color, "%8.1f%%", "heavy operations", + print_metric(config, ctxp, color, "%8.1f%%", "Heavy Operations", heavy_ops * 100.); if (retiring > 0.7 && light_ops > 0.6) color = PERF_COLOR_GREEN; else color = NULL; - print_metric(config, ctxp, color, "%8.1f%%", "light operations", + print_metric(config, ctxp, color, "%8.1f%%", "Light Operations", light_ops * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) && full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { @@ -1254,13 +1254,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, if (bad_spec > 0.1 && br_mis > 0.05) color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "branch mispredict", + print_metric(config, ctxp, color, "%8.1f%%", "Branch Mispredict", br_mis * 100.); if (bad_spec > 0.1 && m_clears > 0.05) color = PERF_COLOR_RED; else color = NULL; - print_metric(config, ctxp, color, "%8.1f%%", "machine clears", + print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears", m_clears * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) && full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { @@ -1274,13 +1274,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, if (fe_bound > 0.2 && fetch_lat > 0.15) color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "fetch latency", + print_metric(config, ctxp, color, "%8.1f%%", "Fetch Latency", fetch_lat * 100.); if (fe_bound > 0.2 && fetch_bw > 0.1) color = PERF_COLOR_RED; else color = NULL; - print_metric(config, ctxp, color, "%8.1f%%", "fetch bandwidth", + print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth", fetch_bw * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) && full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { @@ -1294,13 +1294,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, if (be_bound > 0.2 && mem_bound > 0.2) color = PERF_COLOR_RED; - print_metric(config, ctxp, color, "%8.1f%%", "memory bound", + print_metric(config, ctxp, color, "%8.1f%%", "Memory Bound", mem_bound * 100.); if (be_bound > 0.2 && core_bound > 0.1) color = PERF_COLOR_RED; else color = NULL; - print_metric(config, ctxp, color, "%8.1f%%", "Core bound", + print_metric(config, ctxp, color, "%8.1f%%", "Core Bound", core_bound * 100.); } else if (evsel->metric_expr) { generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL, diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 75bec32d4f57..647b7dff8ef3 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -2102,8 +2102,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, * unusual. One significant peculiarity is that the mapping (start -> pgoff) * is not the same for the kernel map and the modules map. That happens because * the data is copied adjacently whereas the original kcore has gaps. Finally, - * kallsyms and modules files are compared with their copies to check that - * modules have not been loaded or unloaded while the copies were taking place. + * kallsyms file is compared with its copy to check that modules have not been + * loaded or unloaded while the copies were taking place. * * Return: %0 on success, %-1 on failure. */ @@ -2166,9 +2166,6 @@ int kcore_copy(const char *from_dir, const char *to_dir) goto out_extract_close; } - if (kcore_copy__compare_file(from_dir, to_dir, "modules")) - goto out_extract_close; - if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) goto out_extract_close; diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c index 812424dbf2d5..538790758e24 100644 --- a/tools/perf/util/synthetic-events.c +++ b/tools/perf/util/synthetic-events.c @@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event, bool is_kernel) { struct build_id bid; + struct nsinfo *nsi; + struct nscookie nc; int rc; - if (is_kernel) + if (is_kernel) { rc = sysfs__read_build_id("/sys/kernel/notes", &bid); - else - rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1; + goto out; + } + + nsi = nsinfo__new(event->pid); + nsinfo__mountns_enter(nsi, &nc); + rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1; + + nsinfo__mountns_exit(&nc); + nsinfo__put(nsi); + +out: if (rc == 0) { memcpy(event->build_id, bid.data, sizeof(bid.data)); event->build_id_size = (u8) bid.size; |