aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexei Starovoitov <[email protected]>2024-07-09 19:00:25 -0700
committerAlexei Starovoitov <[email protected]>2024-07-09 19:03:36 -0700
commit02779af2419a91f847c22e488f8d13ec301cf833 (patch)
treee4a4e77fe98c18d7efda89eabd3de9fd9d8bc962
parent746d684ea579927015cde53cff8fc365caaf93b7 (diff)
parenta459f4bb27f2e2730039c57786b82288742c8c74 (diff)
Merge branch 'fix-libbpf-bpf-skeleton-forward-backward-compat'
Andrii Nakryiko says: ==================== Fix libbpf BPF skeleton forward/backward compat Fix recently identified (but long standing) bug with handling BPF skeleton forward and backward compatibility. On libbpf side, even though BPF skeleton was always designed to be forward and backwards compatible through recording actual size of constrituents of BPF skeleton itself (map/prog/var skeleton definitions), libbpf implementation did implicitly hard-code those sizes by virtue of using a trivial array access syntax. This issue will only affect libbpf used as a shared library. Statically compiled libbpfs will always be in sync with BPF skeleton, bypassing this problem altogether. This patch set fixes libbpf, but also mitigates the problem for old libbpf versions by teaching bpftool to generate more conservative BPF skeleton, if possible (i.e., if there are no struct_ops maps defined). v1->v2: - fix SOB, add acks, typo fixes (Quentin, Eduard); - improve reporting of skipped map auto-attachment (Alan, Eduard). ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
-rw-r--r--tools/bpf/bpftool/gen.c46
-rw-r--r--tools/lib/bpf/libbpf.c71
2 files changed, 72 insertions, 45 deletions
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 51eaed76db97..5a4d3240689e 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -852,24 +852,41 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool
{
struct bpf_map *map;
char ident[256];
- size_t i;
+ size_t i, map_sz;
if (!map_cnt)
return;
+ /* for backward compatibility with old libbpf versions that don't
+ * handle new BPF skeleton with new struct bpf_map_skeleton definition
+ * that includes link field, avoid specifying new increased size,
+ * unless we absolutely have to (i.e., if there are struct_ops maps
+ * present)
+ */
+ map_sz = offsetof(struct bpf_map_skeleton, link);
+ if (populate_links) {
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
+ map_sz = sizeof(struct bpf_map_skeleton);
+ break;
+ }
+ }
+ }
+
codegen("\
\n\
- \n\
+ \n\
/* maps */ \n\
s->map_cnt = %zu; \n\
- s->map_skel_sz = sizeof(*s->maps); \n\
- s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
+ s->map_skel_sz = %zu; \n\
+ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\
+ sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\
if (!s->maps) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
",
- map_cnt
+ map_cnt, map_sz, map_sz, map_sz
);
i = 0;
bpf_object__for_each_map(map, obj) {
@@ -878,23 +895,22 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool
codegen("\
\n\
- \n\
- s->maps[%zu].name = \"%s\"; \n\
- s->maps[%zu].map = &obj->maps.%s; \n\
+ \n\
+ map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
+ map->name = \"%s\"; \n\
+ map->map = &obj->maps.%s; \n\
",
- i, bpf_map__name(map), i, ident);
+ i, bpf_map__name(map), ident);
/* memory-mapped internal maps */
if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
- printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
- i, ident);
+ printf("\tmap->mmaped = (void **)&obj->%s;\n", ident);
}
if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
codegen("\
\n\
- s->maps[%zu].link = &obj->links.%s;\n\
- ",
- i, ident);
+ map->link = &obj->links.%s; \n\
+ ", ident);
}
i++;
}
@@ -1463,6 +1479,7 @@ static int do_skeleton(int argc, char **argv)
%1$s__create_skeleton(struct %1$s *obj) \n\
{ \n\
struct bpf_object_skeleton *s; \n\
+ struct bpf_map_skeleton *map __attribute__((unused));\n\
int err; \n\
\n\
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
@@ -1753,6 +1770,7 @@ static int do_subskeleton(int argc, char **argv)
{ \n\
struct %1$s *obj; \n\
struct bpf_object_subskeleton *s; \n\
+ struct bpf_map_skeleton *map __attribute__((unused));\n\
int err; \n\
\n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 30f121754d83..a3be6f8fac09 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -13712,14 +13712,15 @@ int libbpf_num_possible_cpus(void)
static int populate_skeleton_maps(const struct bpf_object *obj,
struct bpf_map_skeleton *maps,
- size_t map_cnt)
+ size_t map_cnt, size_t map_skel_sz)
{
int i;
for (i = 0; i < map_cnt; i++) {
- struct bpf_map **map = maps[i].map;
- const char *name = maps[i].name;
- void **mmaped = maps[i].mmaped;
+ struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
+ struct bpf_map **map = map_skel->map;
+ const char *name = map_skel->name;
+ void **mmaped = map_skel->mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
@@ -13736,13 +13737,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj,
static int populate_skeleton_progs(const struct bpf_object *obj,
struct bpf_prog_skeleton *progs,
- size_t prog_cnt)
+ size_t prog_cnt, size_t prog_skel_sz)
{
int i;
for (i = 0; i < prog_cnt; i++) {
- struct bpf_program **prog = progs[i].prog;
- const char *name = progs[i].name;
+ struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
+ struct bpf_program **prog = prog_skel->prog;
+ const char *name = prog_skel->name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
@@ -13783,13 +13785,13 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
*s->obj = obj;
- err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
+ err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
- err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
+ err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
return libbpf_err(err);
@@ -13819,20 +13821,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
return libbpf_err(-errno);
}
- err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
+ err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
- err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
+ err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
- var_skel = &s->vars[var_idx];
+ var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
map = *var_skel->map;
map_type_id = bpf_map__btf_value_type_id(map);
map_type = btf__type_by_id(btf, map_type_id);
@@ -13879,10 +13881,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
}
for (i = 0; i < s->map_cnt; i++) {
- struct bpf_map *map = *s->maps[i].map;
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_map *map = *map_skel->map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
- void **mmaped = s->maps[i].mmaped;
+ void **mmaped = map_skel->mmaped;
if (!mmaped)
continue;
@@ -13930,8 +13933,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
int i, err;
for (i = 0; i < s->prog_cnt; i++) {
- struct bpf_program *prog = *s->progs[i].prog;
- struct bpf_link **link = s->progs[i].link;
+ struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
+ struct bpf_program *prog = *prog_skel->prog;
+ struct bpf_link **link = prog_skel->link;
if (!prog->autoload || !prog->autoattach)
continue;
@@ -13963,31 +13967,34 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
*/
}
- /* Skeleton is created with earlier version of bpftool
- * which does not support auto-attachment
- */
- if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
- return 0;
for (i = 0; i < s->map_cnt; i++) {
- struct bpf_map *map = *s->maps[i].map;
- struct bpf_link **link = s->maps[i].link;
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_map *map = *map_skel->map;
+ struct bpf_link **link;
if (!map->autocreate || !map->autoattach)
continue;
- if (*link)
- continue;
-
/* only struct_ops maps can be attached */
if (!bpf_map__is_struct_ops(map))
continue;
- *link = bpf_map__attach_struct_ops(map);
+ /* skeleton is created with earlier version of bpftool, notify user */
+ if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
+ pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
+ bpf_map__name(map));
+ continue;
+ }
+
+ link = map_skel->link;
+ if (*link)
+ continue;
+
+ *link = bpf_map__attach_struct_ops(map);
if (!*link) {
err = -errno;
- pr_warn("map '%s': failed to auto-attach: %d\n",
- bpf_map__name(map), err);
+ pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err);
return libbpf_err(err);
}
}
@@ -14000,7 +14007,8 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
int i;
for (i = 0; i < s->prog_cnt; i++) {
- struct bpf_link **link = s->progs[i].link;
+ struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
+ struct bpf_link **link = prog_skel->link;
bpf_link__destroy(*link);
*link = NULL;
@@ -14010,7 +14018,8 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
return;
for (i = 0; i < s->map_cnt; i++) {
- struct bpf_link **link = s->maps[i].link;
+ struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
+ struct bpf_link **link = map_skel->link;
if (link) {
bpf_link__destroy(*link);