diff options
Diffstat (limited to 'tools/lib')
-rw-r--r-- | tools/lib/bpf/bpf_helpers.h | 2 | ||||
-rw-r--r-- | tools/lib/bpf/btf.c | 83 | ||||
-rw-r--r-- | tools/lib/bpf/btf.h | 2 | ||||
-rw-r--r-- | tools/lib/bpf/btf_dump.c | 39 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.c | 34 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.map | 2 | ||||
-rw-r--r-- | tools/lib/perf/Documentation/libperf-counting.txt | 14 | ||||
-rw-r--r-- | tools/lib/perf/Documentation/libperf-sampling.txt | 13 | ||||
-rw-r--r-- | tools/lib/perf/Documentation/libperf.txt | 4 |
9 files changed, 161 insertions, 32 deletions
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index bc14db706b88..e9a4ecddb7a5 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -40,7 +40,7 @@ * Helper macro to manipulate data structures */ #ifndef offsetof -#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) +#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) #endif #ifndef container_of #define container_of(ptr, type, member) \ diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 4843e44916f7..7dfca7016aaa 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -41,6 +41,7 @@ struct btf { __u32 types_size; __u32 data_size; int fd; + int ptr_sz; }; static inline __u64 ptr_to_u64(const void *ptr) @@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) return btf->types[type_id]; } +static int determine_ptr_size(const struct btf *btf) +{ + const struct btf_type *t; + const char *name; + int i; + + for (i = 1; i <= btf->nr_types; i++) { + t = btf__type_by_id(btf, i); + if (!btf_is_int(t)) + continue; + + name = btf__name_by_offset(btf, t->name_off); + if (!name) + continue; + + if (strcmp(name, "long int") == 0 || + strcmp(name, "long unsigned int") == 0) { + if (t->size != 4 && t->size != 8) + continue; + return t->size; + } + } + + return -1; +} + +static size_t btf_ptr_sz(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz; +} + +/* Return pointer size this BTF instance assumes. The size is heuristically + * determined by looking for 'long' or 'unsigned long' integer type and + * recording its size in bytes. If BTF type information doesn't have any such + * type, this function returns 0. In the latter case, native architecture's + * pointer size is assumed, so will be either 4 or 8, depending on + * architecture that libbpf was compiled for. It's possible to override + * guessed value by using btf__set_pointer_size() API. + */ +size_t btf__pointer_size(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + + if (btf->ptr_sz < 0) + /* not enough BTF type info to guess */ + return 0; + + return btf->ptr_sz; +} + +/* Override or set pointer size in bytes. Only values of 4 and 8 are + * supported. + */ +int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) +{ + if (ptr_sz != 4 && ptr_sz != 8) + return -EINVAL; + btf->ptr_sz = ptr_sz; + return 0; +} + static bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void || btf_is_fwd(t); @@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) size = t->size; goto done; case BTF_KIND_PTR: - size = sizeof(void *); + size = btf_ptr_sz(btf); goto done; case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: @@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id) switch (kind) { case BTF_KIND_INT: case BTF_KIND_ENUM: - return min(sizeof(void *), (size_t)t->size); + return min(btf_ptr_sz(btf), (size_t)t->size); case BTF_KIND_PTR: - return sizeof(void *); + return btf_ptr_sz(btf); case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: @@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) if (IS_ERR(btf)) goto done; + switch (gelf_getclass(elf)) { + case ELFCLASS32: + btf__set_pointer_size(btf, 4); + break; + case ELFCLASS64: + btf__set_pointer_size(btf, 8); + break; + default: + pr_warn("failed to get ELF class (bitness) for %s\n", path); + break; + } + if (btf_ext && btf_ext_data) { *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index f4a1a1d2b9a3..1ca14448df4c 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); +LIBBPF_API size_t btf__pointer_size(const struct btf *btf); +LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz); LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index cf711168d34a..57c00fa63932 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -13,6 +13,7 @@ #include <errno.h> #include <linux/err.h> #include <linux/btf.h> +#include <linux/kernel.h> #include "btf.h" #include "hashmap.h" #include "libbpf.h" @@ -60,6 +61,7 @@ struct btf_dump { const struct btf_ext *btf_ext; btf_dump_printf_fn_t printf_fn; struct btf_dump_opts opts; + int ptr_sz; bool strip_mods; /* per-type auxiliary state */ @@ -138,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf, d->btf_ext = btf_ext; d->printf_fn = printf_fn; d->opts.ctx = opts ? opts->ctx : NULL; + d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *); d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); if (IS_ERR(d->type_names)) { @@ -549,6 +552,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) } } +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t); + static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t); static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, @@ -671,6 +677,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) switch (kind) { case BTF_KIND_INT: + /* Emit type alias definitions if necessary */ + btf_dump_emit_missing_aliases(d, id, t); + tstate->emit_state = EMITTED; break; case BTF_KIND_ENUM: @@ -797,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d, int align, int lvl) { int off_diff = m_off - cur_off; - int ptr_bits = sizeof(void *) * 8; + int ptr_bits = d->ptr_sz * 8; if (off_diff <= 0) /* no gap */ @@ -870,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, ": %d", m_sz); off = m_off + m_sz; } else { - m_sz = max(0, btf__resolve_size(d->btf, m->type)); + m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type)); off = m_off + m_sz * 8; } btf_dump_printf(d, ";"); @@ -890,6 +899,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, " __attribute__((packed))"); } +static const char *missing_base_types[][2] = { + /* + * GCC emits typedefs to its internal __PolyX_t types when compiling Arm + * SIMD intrinsics. Alias them to standard base types. + */ + { "__Poly8_t", "unsigned char" }, + { "__Poly16_t", "unsigned short" }, + { "__Poly64_t", "unsigned long long" }, + { "__Poly128_t", "unsigned __int128" }, +}; + +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + const char *name = btf_dump_type_name(d, id); + int i; + + for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) { + if (strcmp(name, missing_base_types[i][0]) == 0) { + btf_dump_printf(d, "typedef %s %s;\n\n", + missing_base_types[i][1], name); + break; + } + } +} + static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t) { diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 0a06124f7999..0ad0b0491e1f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2264,7 +2264,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, data = elf_getdata(scn, NULL); if (!scn || !data) { pr_warn("failed to get Elf_Data from map section %d (%s)\n", - obj->efile.maps_shndx, MAPS_ELF_SEC); + obj->efile.btf_maps_shndx, MAPS_ELF_SEC); return -EINVAL; } @@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj, BTF_ELF_SEC, err); goto out; } + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); err = 0; } if (btf_ext_data) { @@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) if (IS_ERR(kern_btf)) return PTR_ERR(kern_btf); + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); bpf_object__sanitize_btf(obj, kern_btf); } @@ -3478,10 +3482,11 @@ bpf_object__probe_global_data(struct bpf_object *obj) map = bpf_create_map_xattr(&map_attr); if (map < 0) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + ret = -errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", - __func__, cp, errno); - return -errno; + __func__, cp, -ret); + return ret; } insns[0].imm = map; @@ -5194,7 +5199,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, static int bpf_object__collect_map_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data) { - int i, j, nrels, new_sz, ptr_sz = sizeof(void *); + const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); + int i, j, nrels, new_sz; const struct btf_var_secinfo *vi = NULL; const struct btf_type *sec, *var, *def; const struct btf_member *member; @@ -5243,7 +5249,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, vi = btf_var_secinfos(sec) + map->btf_var_idx; if (vi->offset <= rel.r_offset && - rel.r_offset + sizeof(void *) <= vi->offset + vi->size) + rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) break; } if (j == obj->nr_maps) { @@ -5279,17 +5285,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, return -EINVAL; moff = rel.r_offset - vi->offset - moff; - if (moff % ptr_sz) + /* here we use BPF pointer size, which is always 64 bit, as we + * are parsing ELF that was built for BPF target + */ + if (moff % bpf_ptr_sz) return -EINVAL; - moff /= ptr_sz; + moff /= bpf_ptr_sz; if (moff >= map->init_slots_sz) { new_sz = moff + 1; - tmp = realloc(map->init_slots, new_sz * ptr_sz); + tmp = realloc(map->init_slots, new_sz * host_ptr_sz); if (!tmp) return -ENOMEM; map->init_slots = tmp; memset(map->init_slots + map->init_slots_sz, 0, - (new_sz - map->init_slots_sz) * ptr_sz); + (new_sz - map->init_slots_sz) * host_ptr_sz); map->init_slots_sz = new_sz; } map->init_slots[moff] = targ_map; @@ -6012,9 +6021,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, } if (bpf_obj_pin(prog->instances.fds[instance], path)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("failed to pin program: %s\n", cp); - return -errno; + return err; } pr_debug("pinned program '%s'\n", path); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 0c4722bfdd0a..e35bd6cdbdbf 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -295,5 +295,7 @@ LIBBPF_0.1.0 { bpf_program__set_sk_lookup; btf__parse; btf__parse_raw; + btf__pointer_size; btf__set_fd; + btf__set_pointer_size; } LIBBPF_0.0.9; diff --git a/tools/lib/perf/Documentation/libperf-counting.txt b/tools/lib/perf/Documentation/libperf-counting.txt index cae9757f49c1..8b75efcd67ce 100644 --- a/tools/lib/perf/Documentation/libperf-counting.txt +++ b/tools/lib/perf/Documentation/libperf-counting.txt @@ -7,13 +7,13 @@ libperf-counting - counting interface DESCRIPTION ----------- -The counting interface provides API to meassure and get count for specific perf events. +The counting interface provides API to measure and get count for specific perf events. The following test tries to explain count on `counting.c` example. It is by no means complete guide to counting, but shows libperf basic API for counting. -The `counting.c` comes with libbperf package and can be compiled and run like: +The `counting.c` comes with libperf package and can be compiled and run like: [source,bash] -- @@ -26,7 +26,8 @@ count 176242, enabled 176242, run 176242 It requires root access, because of the `PERF_COUNT_SW_CPU_CLOCK` event, which is available only for root. -The `counting.c` example monitors two events on the current process and displays their count, in a nutshel it: +The `counting.c` example monitors two events on the current process and displays +their count, in a nutshell it: * creates events * adds them to the event list @@ -152,7 +153,7 @@ Configure event list with the thread map and open events: -- Both events are created as disabled (note the `disabled = 1` assignment above), -so we need to enable the whole list explicitely (both events). +so we need to enable the whole list explicitly (both events). From this moment events are counting and we can do our workload. @@ -167,7 +168,8 @@ When we are done we disable the events list. 79 perf_evlist__disable(evlist); -- -Now we need to get the counts from events, following code iterates throught the events list and read counts: +Now we need to get the counts from events, following code iterates through the +events list and read counts: [source,c] -- @@ -178,7 +180,7 @@ Now we need to get the counts from events, following code iterates throught the 85 } -- -And finaly cleanup. +And finally cleanup. We close the whole events list (both events) and remove it together with the threads map: diff --git a/tools/lib/perf/Documentation/libperf-sampling.txt b/tools/lib/perf/Documentation/libperf-sampling.txt index d71a7b4fcf5f..d6ca24f6ef78 100644 --- a/tools/lib/perf/Documentation/libperf-sampling.txt +++ b/tools/lib/perf/Documentation/libperf-sampling.txt @@ -8,13 +8,13 @@ libperf-sampling - sampling interface DESCRIPTION ----------- -The sampling interface provides API to meassure and get count for specific perf events. +The sampling interface provides API to measure and get count for specific perf events. The following test tries to explain count on `sampling.c` example. It is by no means complete guide to sampling, but shows libperf basic API for sampling. -The `sampling.c` comes with libbperf package and can be compiled and run like: +The `sampling.c` comes with libperf package and can be compiled and run like: [source,bash] -- @@ -33,7 +33,8 @@ cpu 0, pid 4465, tid 4470, ip 7f84fe0ebebf, period 176 It requires root access, because it uses hardware cycles event. -The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a nutshel it: +The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a +nutshell it: - creates events - adds them to the event list @@ -90,7 +91,7 @@ Once the setup is complete we start by defining cycles event using the `struct p 36 }; -- -Next step is to prepare cpus map. +Next step is to prepare CPUs map. In this case we will monitor all the available CPUs: @@ -152,7 +153,7 @@ Once the events list is open, we can create memory maps AKA perf ring buffers: -- The event is created as disabled (note the `disabled = 1` assignment above), -so we need to enable the events list explicitely. +so we need to enable the events list explicitly. From this moment the cycles event is sampling. @@ -212,7 +213,7 @@ Each sample needs to get parsed: 106 cpu, pid, tid, ip, period); -- -And finaly cleanup. +And finally cleanup. We close the whole events list (both events) and remove it together with the threads map: diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt index 5a6bb512789d..0c74c30ed23a 100644 --- a/tools/lib/perf/Documentation/libperf.txt +++ b/tools/lib/perf/Documentation/libperf.txt @@ -29,7 +29,7 @@ SYNOPSIS void libperf_init(libperf_print_fn_t fn); -- -*API to handle cpu maps:* +*API to handle CPU maps:* [source,c] -- @@ -217,7 +217,7 @@ Following objects are key to the libperf interface: [horizontal] -struct perf_cpu_map:: Provides a cpu list abstraction. +struct perf_cpu_map:: Provides a CPU list abstraction. struct perf_thread_map:: Provides a thread list abstraction. |