aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/util/machine.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-05-07 11:32:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-05-07 11:32:18 -0700
commitf085df1be60abf670315c11036261cfaec16b2eb (patch)
treec02c07ad31578b90c3cc99be6b5ba680d163bca7 /tools/perf/util/machine.c
parent17784de648be93b4eef0ef8fe28a16ff04feecc7 (diff)
parent9a2d5178b9d51e1c5f9e08989ff97fc8d4893f31 (diff)
Merge tag 'perf-tools-for-v6.4-3-2023-05-06' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tool updates from Arnaldo Carvalho de Melo: "Third version of perf tool updates, with the build problems with with using a 'vmlinux.h' generated from the main build fixed, and the bpf skeleton build disabled by default. Build: - Require libtraceevent to build, one can disable it using NO_LIBTRACEEVENT=1. It is required for tools like 'perf sched', 'perf kvm', 'perf trace', etc. libtraceevent is available in most distros so installing 'libtraceevent-devel' should be a one-time event to continue building perf as usual. Using NO_LIBTRACEEVENT=1 produces tooling that is functional and sufficient for lots of users not interested in those libtraceevent dependent features. - Allow Python support in 'perf script' when libtraceevent isn't linked, as not all features requires it, for instance Intel PT does not use tracepoints. - Error if the python interpreter needed for jevents to work isn't available and NO_JEVENTS=1 isn't set, preventing a build without support for JSON vendor events, which is a rare but possible condition. The two check error messages: $(error ERROR: No python interpreter needed for jevents generation. Install python or build with NO_JEVENTS=1.) $(error ERROR: Python interpreter needed for jevents generation too old (older than 3.6). Install a newer python or build with NO_JEVENTS=1.) - Make libbpf 1.0 the minimum required when building with out of tree, distro provided libbpf. - Use libsdtc++'s and LLVM's libcxx's __cxa_demangle, a portable C++ demangler, add 'perf test' entry for it. - Make binutils libraries opt in, as distros disable building with it due to licensing, they were used for C++ demangling, for instance. - Switch libpfm4 to opt-out rather than opt-in, if libpfm-devel (or equivalent) isn't installed, we'll just have a build warning: Makefile.config:1144: libpfm4 not found, disables libpfm4 support. Please install libpfm4-dev - Add a feature test for scandirat(), that is not implemented so far in musl and uclibc, disabling features that need it, such as scanning for tracepoints in /sys/kernel/tracing/events. perf BPF filters: - New feature where BPF can be used to filter samples, for instance: $ sudo ./perf record -e cycles --filter 'period > 1000' true $ sudo ./perf script perf-exec 2273949 546850.708501: 5029 cycles: ffffffff826f9e25 finish_wait+0x5 ([kernel.kallsyms]) perf-exec 2273949 546850.708508: 32409 cycles: ffffffff826f9e25 finish_wait+0x5 ([kernel.kallsyms]) perf-exec 2273949 546850.708526: 143369 cycles: ffffffff82b4cdbf xas_start+0x5f ([kernel.kallsyms]) perf-exec 2273949 546850.708600: 372650 cycles: ffffffff8286b8f7 __pagevec_lru_add+0x117 ([kernel.kallsyms]) perf-exec 2273949 546850.708791: 482953 cycles: ffffffff829190de __mod_memcg_lruvec_state+0x4e ([kernel.kallsyms]) true 2273949 546850.709036: 501985 cycles: ffffffff828add7c tlb_gather_mmu+0x4c ([kernel.kallsyms]) true 2273949 546850.709292: 503065 cycles: 7f2446d97c03 _dl_map_object_deps+0x973 (/usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2) - In addition to 'period' (PERF_SAMPLE_PERIOD), the other PERF_SAMPLE_ can be used for filtering, and also some other sample accessible values, from tools/perf/Documentation/perf-record.txt: Essentially the BPF filter expression is: <term> <operator> <value> (("," | "||") <term> <operator> <value>)* The <term> can be one of: ip, id, tid, pid, cpu, time, addr, period, txn, weight, phys_addr, code_pgsz, data_pgsz, weight1, weight2, weight3, ins_lat, retire_lat, p_stage_cyc, mem_op, mem_lvl, mem_snoop, mem_remote, mem_lock, mem_dtlb, mem_blk, mem_hops The <operator> can be one of: ==, !=, >, >=, <, <=, & The <value> can be one of: <number> (for any term) na, load, store, pfetch, exec (for mem_op) l1, l2, l3, l4, cxl, io, any_cache, lfb, ram, pmem (for mem_lvl) na, none, hit, miss, hitm, fwd, peer (for mem_snoop) remote (for mem_remote) na, locked (for mem_locked) na, l1_hit, l1_miss, l2_hit, l2_miss, any_hit, any_miss, walk, fault (for mem_dtlb) na, by_data, by_addr (for mem_blk) hops0, hops1, hops2, hops3 (for mem_hops) perf lock contention: - Show lock type with address. - Track and show mmap_lock, siglock and per-cpu rq_lock with address. This is done for mmap_lock by following the current->mm pointer: $ sudo ./perf lock con -abl -- sleep 10 contended total wait max wait avg wait address symbol ... 16344 312.30 ms 2.22 ms 19.11 us ffff8cc702595640 17686 310.08 ms 1.49 ms 17.53 us ffff8cc7025952c0 3 84.14 ms 45.79 ms 28.05 ms ffff8cc78114c478 mmap_lock 3557 76.80 ms 68.75 us 21.59 us ffff8cc77ca3af58 1 68.27 ms 68.27 ms 68.27 ms ffff8cda745dfd70 9 54.53 ms 7.96 ms 6.06 ms ffff8cc7642a48b8 mmap_lock 14629 44.01 ms 60.00 us 3.01 us ffff8cc7625f9ca0 3481 42.63 ms 140.71 us 12.24 us ffffffff937906ac vmap_area_lock 16194 38.73 ms 42.15 us 2.39 us ffff8cd397cbc560 11 38.44 ms 10.39 ms 3.49 ms ffff8ccd6d12fbb8 mmap_lock 1 5.43 ms 5.43 ms 5.43 ms ffff8cd70018f0d8 1674 5.38 ms 422.93 us 3.21 us ffffffff92e06080 tasklist_lock 581 4.51 ms 130.68 us 7.75 us ffff8cc9b1259058 5 3.52 ms 1.27 ms 703.23 us ffff8cc754510070 112 3.47 ms 56.47 us 31.02 us ffff8ccee38b3120 381 3.31 ms 73.44 us 8.69 us ffffffff93790690 purge_vmap_area_lock 255 3.19 ms 36.35 us 12.49 us ffff8d053ce30c80 - Update default map size to 16384. - Allocate single letter option -M for --map-nr-entries, as it is proving being frequently used. - Fix struct rq lock access for older kernels with BPF's CO-RE (Compile once, run everywhere). - Fix problems found with MSAn. perf report/top: - Add inline information when using --call-graph=fp or lbr, as was already done to the --call-graph=dwarf callchain mode. - Improve the 'srcfile' sort key performance by really using an optimization introduced in 6.2 for the 'srcline' sort key that avoids calling addr2line for comparision with each sample. perf sched: - Make 'perf sched latency/map/replay' to use "sched:sched_waking" instead of "sched:sched_waking", consistent with 'perf record' since d566a9c2d482 ("perf sched: Prefer sched_waking event when it exists"). perf ftrace: - Make system wide the default target for latency subcommand, run the following command then generate some network traffic and press control+C: # perf ftrace latency -T __kfree_skb ^C DURATION | COUNT | GRAPH | 0 - 1 us | 27 | ############# | 1 - 2 us | 22 | ########### | 2 - 4 us | 8 | #### | 4 - 8 us | 5 | ## | 8 - 16 us | 24 | ############ | 16 - 32 us | 2 | # | 32 - 64 us | 1 | | 64 - 128 us | 0 | | 128 - 256 us | 0 | | 256 - 512 us | 0 | | 512 - 1024 us | 0 | | 1 - 2 ms | 0 | | 2 - 4 ms | 0 | | 4 - 8 ms | 0 | | 8 - 16 ms | 0 | | 16 - 32 ms | 0 | | 32 - 64 ms | 0 | | 64 - 128 ms | 0 | | 128 - 256 ms | 0 | | 256 - 512 ms | 0 | | 512 - 1024 ms | 0 | | 1 - ... s | 0 | | # perf top: - Add --branch-history (LBR: Last Branch Record) option, just like already available for 'perf record'. - Fix segfault in thread__comm_len() where thread->comm was being used outside thread->comm_lock. perf annotate: - Allow configuring objdump and addr2line in ~/.perfconfig., so that you can use alternative binaries, such as llvm's. perf kvm: - Add TUI mode for 'perf kvm stat report'. Reference counting: - Add reference count checking infrastructure to check for use after free, done to the 'cpumap', 'namespaces', 'maps' and 'map' structs, more to come. To build with it use -DREFCNT_CHECKING=1 in the make command line to build tools/perf. Documented at: https://perf.wiki.kernel.org/index.php/Reference_Count_Checking - The above caught, for instance, fix, present in this series: - Fix maps use after put in 'perf test "Share thread maps"': 'maps' is copied from leader, but the leader is put on line 79 and then 'maps' is used to read the reference count below - so a use after put, with the put of maps happening within thread__put. Fixed by reversing the order of puts so that the leader is put last. - Also several fixes were made to places where reference counts were not being held. - Make this one of the tests in 'make -C tools/perf build-test' to regularly build test it and to make sure no direct access to the reference counted structs are made, doing that via accessors to check the validity of the struct pointer. ARM64: - Fix 'perf report' segfault when filtering coresight traces by sparse lists of CPUs. - Add support for 'simd' as a sort field for 'perf report', to show ARM's NEON SIMD's predicate flags: "partial" and "empty". arm64 vendor events: - Add N1 metrics. Intel vendor events: - Add graniterapids, grandridge and sierraforrest events. - Refresh events for: alderlake, aldernaken, broadwell, broadwellde, broadwellx, cascadelakx, haswell, haswellx, icelake, icelakex, jaketown, meteorlake, knightslanding, sandybridge, sapphirerapids, silvermont, skylake, tigerlake and westmereep-dp - Refresh metrics for alderlake-n, broadwell, broadwellde, broadwellx, haswell, haswellx, icelakex, ivybridge, ivytown and skylakex. perf stat: - Implement --topdown using JSON metrics. - Add TopdownL1 JSON metric as a default if present, but disable it for now for some Intel hybrid architectures, a series of patches addressing this is being reviewed and will be submitted for v6.5. - Use metrics for --smi-cost. - Update topdown documentation. Vendor events (JSON) infrastructure: - Add support for computing and printing metric threshold values. For instance, here is one found in thesapphirerapids json file: { "BriefDescription": "Percentage of cycles spent in System Management Interrupts.", "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)", "MetricGroup": "smi", "MetricName": "smi_cycles", "MetricThreshold": "smi_cycles > 0.1", "ScaleUnit": "100%" }, - Test parsing metric thresholds with the fake PMU in 'perf test pmu-events'. - Support for printing metric thresholds in 'perf list'. - Add --metric-no-threshold option to 'perf stat'. - Add rand (reverse and) and has_pmem (optane memory) support to metrics. - Sort list of input files to avoid depending on the order from readdir() helping in obtaining reproducible builds. S/390: - Add common metrics: - CPI (cycles per instruction), prbstate (ratio of instructions executed in problem state compared to total number of instructions), l1mp (Level one instruction and data cache misses per 100 instructions). - Add cache metrics for z13, z14, z15 and z16. - Add metric for TLB and cache. ARM: - Add raw decoding for SPE (Statistical Profiling Extension) v1.3 MTE (Memory Tagging Extension) and MOPS (Memory Operations) load/store. Intel PT hardware tracing: - Add event type names UINTR (User interrupt delivered) and UIRET (Exiting from user interrupt routine), documented in table 32-50 "CFE Packet Type and Vector Fields Details" in the Intel Processor Trace chapter of The Intel SDM Volume 3 version 078. - Add support for new branch instructions ERETS and ERETU. - Fix CYC timestamps after standalone CBR ARM CoreSight hardware tracing: - Allow user to override timestamp and contextid settings. - Fix segfault in dso lookup. - Fix timeless decode mode detection. - Add separate decode paths for timeless and per-thread modes. auxtrace: - Fix address filter entire kernel size. Miscellaneous: - Fix use-after-free and unaligned bugs in the PLT handling routines. - Use zfree() to reduce chances of use after free. - Add missing 0x prefix for addresses printed in hexadecimal in 'perf probe'. - Suppress massive unsupported target platform errors in the unwind code. - Fix return incorrect build_id size in elf_read_build_id(). - Fix 'perf scripts intel-pt-events.py' IPC output for Python 2 . - Add missing new parameter in kfree_skb tracepoint to the python scripts using it. - Add 'perf bench syscall fork' benchmark. - Add support for printing PERF_MEM_LVLNUM_UNC (Uncached access) in 'perf mem'. - Fix wrong size expectation for perf test 'Setup struct perf_event_attr' caused by the patch adding perf_event_attr::config3. - Fix some spelling mistakes" * tag 'perf-tools-for-v6.4-3-2023-05-06' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: (365 commits) Revert "perf build: Make BUILD_BPF_SKEL default, rename to NO_BPF_SKEL" Revert "perf build: Warn for BPF skeletons if endian mismatches" perf metrics: Fix SEGV with --for-each-cgroup perf bpf skels: Stop using vmlinux.h generated from BTF, use subset of used structs + CO-RE perf stat: Separate bperf from bpf_profiler perf test record+probe_libc_inet_pton: Fix call chain match on x86_64 perf test record+probe_libc_inet_pton: Fix call chain match on s390 perf tracepoint: Fix memory leak in is_valid_tracepoint() perf cs-etm: Add fix for coresight trace for any range of CPUs perf build: Fix unescaped # in perf build-test perf unwind: Suppress massive unsupported target platform errors perf script: Add new parameter in kfree_skb tracepoint to the python scripts using it perf script: Print raw ip instead of binary offset for callchain perf symbols: Fix return incorrect build_id size in elf_read_build_id() perf list: Modify the warning message about scandirat(3) perf list: Fix memory leaks in print_tracepoint_events() perf lock contention: Rework offset calculation with BPF CO-RE perf lock contention: Fix struct rq lock access perf stat: Disable TopdownL1 on hybrid perf stat: Avoid SEGV on counter->name ...
Diffstat (limited to 'tools/perf/util/machine.c')
-rw-r--r--tools/perf/util/machine.c257
1 files changed, 157 insertions, 100 deletions
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 803c9d1803dd..9e02e19c1b7a 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -44,10 +44,11 @@
#include <linux/zalloc.h>
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
+static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip);
static struct dso *machine__kernel_dso(struct machine *machine)
{
- return machine->vmlinux_map->dso;
+ return map__dso(machine->vmlinux_map);
}
static void dsos__init(struct dsos *dsos)
@@ -434,7 +435,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
return NULL;
/* Assume maps are set up if there are any */
- if (thread->maps->nr_maps)
+ if (maps__nr_maps(thread->maps))
return thread;
host_thread = machine__find_thread(host_machine, -1, pid);
@@ -878,46 +879,67 @@ static int machine__process_ksymbol_register(struct machine *machine,
struct perf_sample *sample __maybe_unused)
{
struct symbol *sym;
+ struct dso *dso;
struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
+ bool put_map = false;
+ int err = 0;
if (!map) {
- struct dso *dso = dso__new(event->ksymbol.name);
+ dso = dso__new(event->ksymbol.name);
- if (dso) {
- dso->kernel = DSO_SPACE__KERNEL;
- map = map__new2(0, dso);
- dso__put(dso);
+ if (!dso) {
+ err = -ENOMEM;
+ goto out;
}
-
- if (!dso || !map) {
- return -ENOMEM;
+ dso->kernel = DSO_SPACE__KERNEL;
+ map = map__new2(0, dso);
+ dso__put(dso);
+ if (!map) {
+ err = -ENOMEM;
+ goto out;
}
-
+ /*
+ * The inserted map has a get on it, we need to put to release
+ * the reference count here, but do it after all accesses are
+ * done.
+ */
+ put_map = true;
if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
- map->dso->binary_type = DSO_BINARY_TYPE__OOL;
- map->dso->data.file_size = event->ksymbol.len;
- dso__set_loaded(map->dso);
+ dso->binary_type = DSO_BINARY_TYPE__OOL;
+ dso->data.file_size = event->ksymbol.len;
+ dso__set_loaded(dso);
+ }
+
+ map__set_start(map, event->ksymbol.addr);
+ map__set_end(map, map__start(map) + event->ksymbol.len);
+ err = maps__insert(machine__kernel_maps(machine), map);
+ if (err) {
+ err = -ENOMEM;
+ goto out;
}
- map->start = event->ksymbol.addr;
- map->end = map->start + event->ksymbol.len;
- maps__insert(machine__kernel_maps(machine), map);
- map__put(map);
dso__set_loaded(dso);
if (is_bpf_image(event->ksymbol.name)) {
dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
dso__set_long_name(dso, "", false);
}
+ } else {
+ dso = map__dso(map);
}
- sym = symbol__new(map->map_ip(map, map->start),
+ sym = symbol__new(map__map_ip(map, map__start(map)),
event->ksymbol.len,
0, 0, event->ksymbol.name);
- if (!sym)
- return -ENOMEM;
- dso__insert_symbol(map->dso, sym);
- return 0;
+ if (!sym) {
+ err = -ENOMEM;
+ goto out;
+ }
+ dso__insert_symbol(dso, sym);
+out:
+ if (put_map)
+ map__put(map);
+ return err;
}
static int machine__process_ksymbol_unregister(struct machine *machine,
@@ -931,12 +953,14 @@ static int machine__process_ksymbol_unregister(struct machine *machine,
if (!map)
return 0;
- if (map != machine->vmlinux_map)
+ if (RC_CHK_ACCESS(map) != RC_CHK_ACCESS(machine->vmlinux_map))
maps__remove(machine__kernel_maps(machine), map);
else {
- sym = dso__find_symbol(map->dso, map->map_ip(map, map->start));
+ struct dso *dso = map__dso(map);
+
+ sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
if (sym)
- dso__delete_symbol(map->dso, sym);
+ dso__delete_symbol(dso, sym);
}
return 0;
@@ -960,6 +984,7 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event,
{
struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct dso *dso = map ? map__dso(map) : NULL;
if (dump_trace)
perf_event__fprintf_text_poke(event, machine, stdout);
@@ -972,7 +997,7 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event,
return 0;
}
- if (map && map->dso) {
+ if (dso) {
u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
int ret;
@@ -981,7 +1006,7 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event,
* must be done prior to using kernel maps.
*/
map__load(map);
- ret = dso__data_write_cache_addr(map->dso, map, machine,
+ ret = dso__data_write_cache_addr(dso, map, machine,
event->text_poke.addr,
new_bytes,
event->text_poke.new_len);
@@ -1002,6 +1027,7 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start
struct map *map = NULL;
struct kmod_path m;
struct dso *dso;
+ int err;
if (kmod_path__parse_name(&m, filename))
return NULL;
@@ -1014,10 +1040,12 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start
if (map == NULL)
goto out;
- maps__insert(machine__kernel_maps(machine), map);
-
- /* Put the map here because maps__insert already got it */
- map__put(map);
+ err = maps__insert(machine__kernel_maps(machine), map);
+ /* If maps__insert failed, return NULL. */
+ if (err) {
+ map__put(map);
+ map = NULL;
+ }
out:
/* put the dso here, corresponding to machine__findnew_module_dso */
dso__put(dso);
@@ -1184,26 +1212,29 @@ int machine__create_extra_kernel_map(struct machine *machine,
{
struct kmap *kmap;
struct map *map;
+ int err;
map = map__new2(xm->start, kernel);
if (!map)
- return -1;
+ return -ENOMEM;
- map->end = xm->end;
- map->pgoff = xm->pgoff;
+ map__set_end(map, xm->end);
+ map__set_pgoff(map, xm->pgoff);
kmap = map__kmap(map);
strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
- maps__insert(machine__kernel_maps(machine), map);
+ err = maps__insert(machine__kernel_maps(machine), map);
- pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
- kmap->name, map->start, map->end);
+ if (!err) {
+ pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
+ kmap->name, map__start(map), map__end(map));
+ }
map__put(map);
- return 0;
+ return err;
}
static u64 find_entry_trampoline(struct dso *dso)
@@ -1244,23 +1275,23 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine,
struct maps *kmaps = machine__kernel_maps(machine);
int nr_cpus_avail, cpu;
bool found = false;
- struct map *map;
+ struct map_rb_node *rb_node;
u64 pgoff;
/*
* In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset.
*/
- maps__for_each_entry(kmaps, map) {
+ maps__for_each_entry(kmaps, rb_node) {
+ struct map *dest_map, *map = rb_node->map;
struct kmap *kmap = __map__kmap(map);
- struct map *dest_map;
if (!kmap || !is_entry_trampoline(kmap->name))
continue;
- dest_map = maps__find(kmaps, map->pgoff);
+ dest_map = maps__find(kmaps, map__pgoff(map));
if (dest_map != map)
- map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
+ map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
found = true;
}
if (found || machine->trampolines_mapped)
@@ -1306,13 +1337,14 @@ __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
/* In case of renewal the kernel map, destroy previous one */
machine__destroy_kernel_maps(machine);
+ map__put(machine->vmlinux_map);
machine->vmlinux_map = map__new2(0, kernel);
if (machine->vmlinux_map == NULL)
- return -1;
+ return -ENOMEM;
- machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
- maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
- return 0;
+ map__set_map_ip(machine->vmlinux_map, identity__map_ip);
+ map__set_unmap_ip(machine->vmlinux_map, identity__map_ip);
+ return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
}
void machine__destroy_kernel_maps(struct machine *machine)
@@ -1410,10 +1442,11 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
int machine__load_kallsyms(struct machine *machine, const char *filename)
{
struct map *map = machine__kernel_map(machine);
- int ret = __dso__load_kallsyms(map->dso, filename, map, true);
+ struct dso *dso = map__dso(map);
+ int ret = __dso__load_kallsyms(dso, filename, map, true);
if (ret > 0) {
- dso__set_loaded(map->dso);
+ dso__set_loaded(dso);
/*
* Since /proc/kallsyms will have multiple sessions for the
* kernel, with modules between them, fixup the end of all
@@ -1428,10 +1461,11 @@ int machine__load_kallsyms(struct machine *machine, const char *filename)
int machine__load_vmlinux_path(struct machine *machine)
{
struct map *map = machine__kernel_map(machine);
- int ret = dso__load_vmlinux_path(map->dso, map);
+ struct dso *dso = map__dso(map);
+ int ret = dso__load_vmlinux_path(dso, map);
if (ret > 0)
- dso__set_loaded(map->dso);
+ dso__set_loaded(dso);
return ret;
}
@@ -1473,6 +1507,7 @@ static bool is_kmod_dso(struct dso *dso)
static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
{
char *long_name;
+ struct dso *dso;
struct map *map = maps__find_by_name(maps, m->name);
if (map == NULL)
@@ -1482,16 +1517,17 @@ static int maps__set_module_path(struct maps *maps, const char *path, struct kmo
if (long_name == NULL)
return -ENOMEM;
- dso__set_long_name(map->dso, long_name, true);
- dso__kernel_module_get_build_id(map->dso, "");
+ dso = map__dso(map);
+ dso__set_long_name(dso, long_name, true);
+ dso__kernel_module_get_build_id(dso, "");
/*
* Full name could reveal us kmod compression, so
* we need to update the symtab_type if needed.
*/
- if (m->comp && is_kmod_dso(map->dso)) {
- map->dso->symtab_type++;
- map->dso->comp = m->comp;
+ if (m->comp && is_kmod_dso(dso)) {
+ dso->symtab_type++;
+ dso->comp = m->comp;
}
return 0;
@@ -1588,10 +1624,10 @@ static int machine__create_module(void *arg, const char *name, u64 start,
map = machine__addnew_module_map(machine, start, name);
if (map == NULL)
return -1;
- map->end = start + size;
-
- dso__kernel_module_get_build_id(map->dso, machine->root_dir);
+ map__set_end(map, start + size);
+ dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
+ map__put(map);
return 0;
}
@@ -1624,35 +1660,38 @@ static int machine__create_modules(struct machine *machine)
static void machine__set_kernel_mmap(struct machine *machine,
u64 start, u64 end)
{
- machine->vmlinux_map->start = start;
- machine->vmlinux_map->end = end;
+ map__set_start(machine->vmlinux_map, start);
+ map__set_end(machine->vmlinux_map, end);
/*
* Be a bit paranoid here, some perf.data file came with
* a zero sized synthesized MMAP event for the kernel.
*/
if (start == 0 && end == 0)
- machine->vmlinux_map->end = ~0ULL;
+ map__set_end(machine->vmlinux_map, ~0ULL);
}
-static void machine__update_kernel_mmap(struct machine *machine,
+static int machine__update_kernel_mmap(struct machine *machine,
u64 start, u64 end)
{
- struct map *map = machine__kernel_map(machine);
+ struct map *orig, *updated;
+ int err;
- map__get(map);
- maps__remove(machine__kernel_maps(machine), map);
+ orig = machine->vmlinux_map;
+ updated = map__get(orig);
+ machine->vmlinux_map = updated;
machine__set_kernel_mmap(machine, start, end);
+ maps__remove(machine__kernel_maps(machine), orig);
+ err = maps__insert(machine__kernel_maps(machine), updated);
+ map__put(orig);
- maps__insert(machine__kernel_maps(machine), map);
- map__put(map);
+ return err;
}
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
const char *name = NULL;
- struct map *map;
u64 start = 0, end = ~0ULL;
int ret;
@@ -1684,7 +1723,9 @@ int machine__create_kernel_maps(struct machine *machine)
* we have a real start address now, so re-order the kmaps
* assume it's the last in the kmaps
*/
- machine__update_kernel_mmap(machine, start, end);
+ ret = machine__update_kernel_mmap(machine, start, end);
+ if (ret < 0)
+ goto out_put;
}
if (machine__create_extra_kernel_maps(machine, kernel))
@@ -1692,9 +1733,12 @@ int machine__create_kernel_maps(struct machine *machine)
if (end == ~0ULL) {
/* update end address of the kernel map using adjacent module address */
- map = map__next(machine__kernel_map(machine));
- if (map)
- machine__set_kernel_mmap(machine, start, map->start);
+ struct map_rb_node *rb_node = maps__find_node(machine__kernel_maps(machine),
+ machine__kernel_map(machine));
+ struct map_rb_node *next = map_rb_node__next(rb_node);
+
+ if (next)
+ machine__set_kernel_mmap(machine, start, map__start(next->map));
}
out_put:
@@ -1767,10 +1811,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (map == NULL)
goto out_problem;
- map->end = map->start + xm->end - xm->start;
+ map__set_end(map, map__start(map) + xm->end - xm->start);
if (build_id__is_defined(bid))
- dso__set_build_id(map->dso, bid);
+ dso__set_build_id(map__dso(map), bid);
} else if (is_kernel_mmap) {
const char *symbol_name = xm->name + strlen(mmap_name);
@@ -1827,7 +1871,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (strstr(kernel->long_name, "vmlinux"))
dso__set_short_name(kernel, "[kernel.vmlinux]", false);
- machine__update_kernel_mmap(machine, xm->start, xm->end);
+ if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
+ dso__put(kernel);
+ goto out_problem;
+ }
if (build_id__is_defined(bid))
dso__set_build_id(kernel, bid);
@@ -2227,18 +2274,20 @@ static char *callchain_srcline(struct map_symbol *ms, u64 ip)
{
struct map *map = ms->map;
char *srcline = NULL;
+ struct dso *dso;
if (!map || callchain_param.key == CCKEY_FUNCTION)
return srcline;
- srcline = srcline__tree_find(&map->dso->srclines, ip);
+ dso = map__dso(map);
+ srcline = srcline__tree_find(&dso->srclines, ip);
if (!srcline) {
bool show_sym = false;
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
- srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
+ srcline = get_srcline(dso, map__rip_2objdump(map, ip),
ms->sym, show_sym, show_addr, ip);
- srcline__tree_insert(&map->dso->srclines, ip, srcline);
+ srcline__tree_insert(&dso->srclines, ip, srcline);
}
return srcline;
@@ -2262,7 +2311,7 @@ static int add_callchain_ip(struct thread *thread,
{
struct map_symbol ms;
struct addr_location al;
- int nr_loop_iter = 0;
+ int nr_loop_iter = 0, err;
u64 iter_cycles = 0;
const char *srcline = NULL;
@@ -2322,10 +2371,16 @@ static int add_callchain_ip(struct thread *thread,
ms.maps = al.maps;
ms.map = al.map;
ms.sym = al.sym;
+
+ if (!branch && append_inlines(cursor, &ms, ip) == 0)
+ return 0;
+
srcline = callchain_srcline(&ms, al.addr);
- return callchain_cursor_append(cursor, ip, &ms,
- branch, flags, nr_loop_iter,
- iter_cycles, branch_from, srcline);
+ err = callchain_cursor_append(cursor, ip, &ms,
+ branch, flags, nr_loop_iter,
+ iter_cycles, branch_from, srcline);
+ map__put(al.map);
+ return err;
}
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
@@ -2822,7 +2877,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
static u64 get_leaf_frame_caller(struct perf_sample *sample,
struct thread *thread, int usr_idx)
{
- if (machine__normalized_is(thread->maps->machine, "arm64"))
+ if (machine__normalized_is(maps__machine(thread->maps), "arm64"))
return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
else
return 0;
@@ -3014,21 +3069,23 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms
struct map *map = ms->map;
struct inline_node *inline_node;
struct inline_list *ilist;
+ struct dso *dso;
u64 addr;
int ret = 1;
if (!symbol_conf.inline_name || !map || !sym)
return ret;
- addr = map__map_ip(map, ip);
+ addr = map__dso_map_ip(map, ip);
addr = map__rip_2objdump(map, addr);
+ dso = map__dso(map);
- inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
+ inline_node = inlines__tree_find(&dso->inlined_nodes, addr);
if (!inline_node) {
- inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
+ inline_node = dso__parse_addr_inlines(dso, addr, sym);
if (!inline_node)
return ret;
- inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
+ inlines__tree_insert(&dso->inlined_nodes, inline_node);
}
list_for_each_entry(ilist, &inline_node->val, list) {
@@ -3064,7 +3121,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
* its corresponding binary.
*/
if (entry->ms.map)
- addr = map__map_ip(entry->ms.map, entry->ip);
+ addr = map__dso_map_ip(entry->ms.map, entry->ip);
srcline = callchain_srcline(&entry->ms, addr);
return callchain_cursor_append(cursor, entry->ip, &entry->ms,
@@ -3254,7 +3311,7 @@ int machine__get_kernel_start(struct machine *machine)
* kernel_start = 1ULL << 63 for x86_64.
*/
if (!err && !machine__is(machine, "x86_64"))
- machine->kernel_start = map->start;
+ machine->kernel_start = map__start(map);
}
return err;
}
@@ -3305,8 +3362,8 @@ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, ch
if (sym == NULL)
return NULL;
- *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
- *addrp = map->unmap_ip(map, sym->start);
+ *modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL;
+ *addrp = map__unmap_ip(map, sym->start);
return sym->name;
}
@@ -3325,11 +3382,11 @@ int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv
int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
{
struct maps *maps = machine__kernel_maps(machine);
- struct map *map;
+ struct map_rb_node *pos;
int err = 0;
- for (map = maps__first(maps); map != NULL; map = map__next(map)) {
- err = fn(map, priv);
+ maps__for_each_entry(maps, pos) {
+ err = fn(pos->map, priv);
if (err != 0) {
break;
}
@@ -3349,17 +3406,17 @@ bool machine__is_lock_function(struct machine *machine, u64 addr)
return false;
}
- machine->sched.text_start = kmap->unmap_ip(kmap, sym->start);
+ machine->sched.text_start = map__unmap_ip(kmap, sym->start);
/* should not fail from here */
sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
- machine->sched.text_end = kmap->unmap_ip(kmap, sym->start);
+ machine->sched.text_end = map__unmap_ip(kmap, sym->start);
sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
- machine->lock.text_start = kmap->unmap_ip(kmap, sym->start);
+ machine->lock.text_start = map__unmap_ip(kmap, sym->start);
sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
- machine->lock.text_end = kmap->unmap_ip(kmap, sym->start);
+ machine->lock.text_end = map__unmap_ip(kmap, sym->start);
}
/* failed to get kernel symbols */