diff options
Diffstat (limited to 'tools/lib/bpf')
| -rw-r--r-- | tools/lib/bpf/Build | 2 | ||||
| -rw-r--r-- | tools/lib/bpf/btf.c | 696 | ||||
| -rw-r--r-- | tools/lib/bpf/btf.h | 36 | ||||
| -rw-r--r-- | tools/lib/bpf/btf_dump.c | 8 | ||||
| -rw-r--r-- | tools/lib/bpf/btf_iter.c | 177 | ||||
| -rw-r--r-- | tools/lib/bpf/btf_relocate.c | 519 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf.c | 136 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf.h | 23 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf.map | 4 | ||||
| -rw-r--r-- | tools/lib/bpf/libbpf_internal.h | 39 | ||||
| -rw-r--r-- | tools/lib/bpf/linker.c | 69 | 
11 files changed, 1441 insertions, 268 deletions
| diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index b6619199a706..e2cd558ca0b4 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1,4 +1,4 @@  libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \  	    netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \  	    btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \ -	    usdt.o zip.o elf.o features.o +	    usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 2d0840ef599a..32c00db3b91b 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -116,6 +116,9 @@ struct btf {  	/* whether strings are already deduplicated */  	bool strs_deduped; +	/* whether base_btf should be freed in btf_free for this instance */ +	bool owns_base; +  	/* BTF object FD, if loaded into kernel */  	int fd; @@ -598,7 +601,7 @@ static int btf_sanity_check(const struct btf *btf)  	__u32 i, n = btf__type_cnt(btf);  	int err; -	for (i = 1; i < n; i++) { +	for (i = btf->start_id; i < n; i++) {  		t = btf_type_by_id(btf, i);  		err = btf_validate_type(btf, t, i);  		if (err) @@ -969,6 +972,8 @@ void btf__free(struct btf *btf)  	free(btf->raw_data);  	free(btf->raw_data_swapped);  	free(btf->type_offs); +	if (btf->owns_base) +		btf__free(btf->base_btf);  	free(btf);  } @@ -1084,53 +1089,38 @@ struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)  	return libbpf_ptr(btf_new(data, size, base_btf));  } -static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, -				 struct btf_ext **btf_ext) +struct btf_elf_secs { +	Elf_Data *btf_data; +	Elf_Data *btf_ext_data; +	Elf_Data *btf_base_data; +}; + +static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs)  { -	Elf_Data *btf_data = NULL, *btf_ext_data = NULL; -	int err = 0, fd = -1, idx = 0; -	struct btf *btf = NULL;  	Elf_Scn *scn = NULL; -	Elf *elf = NULL; +	Elf_Data *data;  	GElf_Ehdr ehdr;  	size_t shstrndx; +	int idx = 0; -	if (elf_version(EV_CURRENT) == EV_NONE) { -		pr_warn("failed to init libelf for %s\n", path); -		return ERR_PTR(-LIBBPF_ERRNO__LIBELF); -	} - -	fd = open(path, O_RDONLY | O_CLOEXEC); -	if (fd < 0) { -		err = -errno; -		pr_warn("failed to open %s: %s\n", path, strerror(errno)); -		return ERR_PTR(err); -	} - -	err = -LIBBPF_ERRNO__FORMAT; - -	elf = elf_begin(fd, ELF_C_READ, NULL); -	if (!elf) { -		pr_warn("failed to open %s as ELF file\n", path); -		goto done; -	}  	if (!gelf_getehdr(elf, &ehdr)) {  		pr_warn("failed to get EHDR from %s\n", path); -		goto done; +		goto err;  	}  	if (elf_getshdrstrndx(elf, &shstrndx)) {  		pr_warn("failed to get section names section index for %s\n",  			path); -		goto done; +		goto err;  	}  	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {  		pr_warn("failed to get e_shstrndx from %s\n", path); -		goto done; +		goto err;  	}  	while ((scn = elf_nextscn(elf, scn)) != NULL) { +		Elf_Data **field;  		GElf_Shdr sh;  		char *name; @@ -1138,42 +1128,102 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,  		if (gelf_getshdr(scn, &sh) != &sh) {  			pr_warn("failed to get section(%d) header from %s\n",  				idx, path); -			goto done; +			goto err;  		}  		name = elf_strptr(elf, shstrndx, sh.sh_name);  		if (!name) {  			pr_warn("failed to get section(%d) name from %s\n",  				idx, path); -			goto done; +			goto err;  		} -		if (strcmp(name, BTF_ELF_SEC) == 0) { -			btf_data = elf_getdata(scn, 0); -			if (!btf_data) { -				pr_warn("failed to get section(%d, %s) data from %s\n", -					idx, name, path); -				goto done; -			} -			continue; -		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { -			btf_ext_data = elf_getdata(scn, 0); -			if (!btf_ext_data) { -				pr_warn("failed to get section(%d, %s) data from %s\n", -					idx, name, path); -				goto done; -			} + +		if (strcmp(name, BTF_ELF_SEC) == 0) +			field = &secs->btf_data; +		else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) +			field = &secs->btf_ext_data; +		else if (strcmp(name, BTF_BASE_ELF_SEC) == 0) +			field = &secs->btf_base_data; +		else  			continue; + +		data = elf_getdata(scn, 0); +		if (!data) { +			pr_warn("failed to get section(%d, %s) data from %s\n", +				idx, name, path); +			goto err;  		} +		*field = data;  	} -	if (!btf_data) { +	return 0; + +err: +	return -LIBBPF_ERRNO__FORMAT; +} + +static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, +				 struct btf_ext **btf_ext) +{ +	struct btf_elf_secs secs = {}; +	struct btf *dist_base_btf = NULL; +	struct btf *btf = NULL; +	int err = 0, fd = -1; +	Elf *elf = NULL; + +	if (elf_version(EV_CURRENT) == EV_NONE) { +		pr_warn("failed to init libelf for %s\n", path); +		return ERR_PTR(-LIBBPF_ERRNO__LIBELF); +	} + +	fd = open(path, O_RDONLY | O_CLOEXEC); +	if (fd < 0) { +		err = -errno; +		pr_warn("failed to open %s: %s\n", path, strerror(errno)); +		return ERR_PTR(err); +	} + +	elf = elf_begin(fd, ELF_C_READ, NULL); +	if (!elf) { +		pr_warn("failed to open %s as ELF file\n", path); +		goto done; +	} + +	err = btf_find_elf_sections(elf, path, &secs); +	if (err) +		goto done; + +	if (!secs.btf_data) {  		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);  		err = -ENODATA;  		goto done;  	} -	btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf); -	err = libbpf_get_error(btf); -	if (err) + +	if (secs.btf_base_data) { +		dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size, +					NULL); +		if (IS_ERR(dist_base_btf)) { +			err = PTR_ERR(dist_base_btf); +			dist_base_btf = NULL; +			goto done; +		} +	} + +	btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size, +		      dist_base_btf ?: base_btf); +	if (IS_ERR(btf)) { +		err = PTR_ERR(btf);  		goto done; +	} +	if (dist_base_btf && base_btf) { +		err = btf__relocate(btf, base_btf); +		if (err) +			goto done; +		btf__free(dist_base_btf); +		dist_base_btf = NULL; +	} + +	if (dist_base_btf) +		btf->owns_base = true;  	switch (gelf_getclass(elf)) {  	case ELFCLASS32: @@ -1187,11 +1237,12 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,  		break;  	} -	if (btf_ext && btf_ext_data) { -		*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); -		err = libbpf_get_error(*btf_ext); -		if (err) +	if (btf_ext && secs.btf_ext_data) { +		*btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size); +		if (IS_ERR(*btf_ext)) { +			err = PTR_ERR(*btf_ext);  			goto done; +		}  	} else if (btf_ext) {  		*btf_ext = NULL;  	} @@ -1205,6 +1256,7 @@ done:  	if (btf_ext)  		btf_ext__free(*btf_ext); +	btf__free(dist_base_btf);  	btf__free(btf);  	return ERR_PTR(err); @@ -1739,9 +1791,8 @@ struct btf_pipe {  	struct hashmap *str_off_map; /* map string offsets from src to dst */  }; -static int btf_rewrite_str(__u32 *str_off, void *ctx) +static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)  { -	struct btf_pipe *p = ctx;  	long mapped_off;  	int off, err; @@ -1771,10 +1822,11 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)  	return 0;  } -int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) +static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type)  { -	struct btf_pipe p = { .src = src_btf, .dst = btf }; +	struct btf_field_iter it;  	struct btf_type *t; +	__u32 *str_off;  	int sz, err;  	sz = btf_type_size(src_type); @@ -1782,35 +1834,33 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t  		return libbpf_err(sz);  	/* deconstruct BTF, if necessary, and invalidate raw_data */ -	if (btf_ensure_modifiable(btf)) +	if (btf_ensure_modifiable(p->dst))  		return libbpf_err(-ENOMEM); -	t = btf_add_type_mem(btf, sz); +	t = btf_add_type_mem(p->dst, sz);  	if (!t)  		return libbpf_err(-ENOMEM);  	memcpy(t, src_type, sz); -	err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); +	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);  	if (err)  		return libbpf_err(err); -	return btf_commit_type(btf, sz); +	while ((str_off = btf_field_iter_next(&it))) { +		err = btf_rewrite_str(p, str_off); +		if (err) +			return libbpf_err(err); +	} + +	return btf_commit_type(p->dst, sz);  } -static int btf_rewrite_type_ids(__u32 *type_id, void *ctx) +int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)  { -	struct btf *btf = ctx; - -	if (!*type_id) /* nothing to do for VOID references */ -		return 0; +	struct btf_pipe p = { .src = src_btf, .dst = btf }; -	/* we haven't updated btf's type count yet, so -	 * btf->start_id + btf->nr_types - 1 is the type ID offset we should -	 * add to all newly added BTF types -	 */ -	*type_id += btf->start_id + btf->nr_types - 1; -	return 0; +	return btf_add_type(&p, src_type);  }  static size_t btf_dedup_identity_hash_fn(long key, void *ctx); @@ -1858,6 +1908,9 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)  	memcpy(t, src_btf->types_data, data_sz);  	for (i = 0; i < cnt; i++) { +		struct btf_field_iter it; +		__u32 *type_id, *str_off; +  		sz = btf_type_size(t);  		if (sz < 0) {  			/* unlikely, has to be corrupted src_btf */ @@ -1869,15 +1922,31 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)  		*off = t - btf->types_data;  		/* add, dedup, and remap strings referenced by this BTF type */ -		err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); +		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);  		if (err)  			goto err_out; +		while ((str_off = btf_field_iter_next(&it))) { +			err = btf_rewrite_str(&p, str_off); +			if (err) +				goto err_out; +		}  		/* remap all type IDs referenced from this BTF type */ -		err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf); +		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);  		if (err)  			goto err_out; +		while ((type_id = btf_field_iter_next(&it))) { +			if (!*type_id) /* nothing to do for VOID references */ +				continue; + +			/* we haven't updated btf's type count yet, so +			 * btf->start_id + btf->nr_types - 1 is the type ID offset we should +			 * add to all newly added BTF types +			 */ +			*type_id += btf->start_id + btf->nr_types - 1; +		} +  		/* go to next type data and type offset index entry */  		t += sz;  		off++; @@ -3453,11 +3522,19 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *  	int i, r;  	for (i = 0; i < d->btf->nr_types; i++) { +		struct btf_field_iter it;  		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); +		__u32 *str_off; -		r = btf_type_visit_str_offs(t, fn, ctx); +		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);  		if (r)  			return r; + +		while ((str_off = btf_field_iter_next(&it))) { +			r = fn(str_off, ctx); +			if (r) +				return r; +		}  	}  	if (!d->btf_ext) @@ -4919,10 +4996,23 @@ static int btf_dedup_remap_types(struct btf_dedup *d)  	for (i = 0; i < d->btf->nr_types; i++) {  		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); +		struct btf_field_iter it; +		__u32 *type_id; -		r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d); +		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);  		if (r)  			return r; + +		while ((type_id = btf_field_iter_next(&it))) { +			__u32 resolved_id, new_id; + +			resolved_id = resolve_type_id(d, *type_id); +			new_id = d->hypot_map[resolved_id]; +			if (new_id > BTF_MAX_NR_TYPES) +				return -EINVAL; + +			*type_id = new_id; +		}  	}  	if (!d->btf_ext) @@ -5003,136 +5093,6 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt  	return btf__parse_split(path, vmlinux_btf);  } -int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx) -{ -	int i, n, err; - -	switch (btf_kind(t)) { -	case BTF_KIND_INT: -	case BTF_KIND_FLOAT: -	case BTF_KIND_ENUM: -	case BTF_KIND_ENUM64: -		return 0; - -	case BTF_KIND_FWD: -	case BTF_KIND_CONST: -	case BTF_KIND_VOLATILE: -	case BTF_KIND_RESTRICT: -	case BTF_KIND_PTR: -	case BTF_KIND_TYPEDEF: -	case BTF_KIND_FUNC: -	case BTF_KIND_VAR: -	case BTF_KIND_DECL_TAG: -	case BTF_KIND_TYPE_TAG: -		return visit(&t->type, ctx); - -	case BTF_KIND_ARRAY: { -		struct btf_array *a = btf_array(t); - -		err = visit(&a->type, ctx); -		err = err ?: visit(&a->index_type, ctx); -		return err; -	} - -	case BTF_KIND_STRUCT: -	case BTF_KIND_UNION: { -		struct btf_member *m = btf_members(t); - -		for (i = 0, n = btf_vlen(t); i < n; i++, m++) { -			err = visit(&m->type, ctx); -			if (err) -				return err; -		} -		return 0; -	} - -	case BTF_KIND_FUNC_PROTO: { -		struct btf_param *m = btf_params(t); - -		err = visit(&t->type, ctx); -		if (err) -			return err; -		for (i = 0, n = btf_vlen(t); i < n; i++, m++) { -			err = visit(&m->type, ctx); -			if (err) -				return err; -		} -		return 0; -	} - -	case BTF_KIND_DATASEC: { -		struct btf_var_secinfo *m = btf_var_secinfos(t); - -		for (i = 0, n = btf_vlen(t); i < n; i++, m++) { -			err = visit(&m->type, ctx); -			if (err) -				return err; -		} -		return 0; -	} - -	default: -		return -EINVAL; -	} -} - -int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx) -{ -	int i, n, err; - -	err = visit(&t->name_off, ctx); -	if (err) -		return err; - -	switch (btf_kind(t)) { -	case BTF_KIND_STRUCT: -	case BTF_KIND_UNION: { -		struct btf_member *m = btf_members(t); - -		for (i = 0, n = btf_vlen(t); i < n; i++, m++) { -			err = visit(&m->name_off, ctx); -			if (err) -				return err; -		} -		break; -	} -	case BTF_KIND_ENUM: { -		struct btf_enum *m = btf_enum(t); - -		for (i = 0, n = btf_vlen(t); i < n; i++, m++) { -			err = visit(&m->name_off, ctx); -			if (err) -				return err; -		} -		break; -	} -	case BTF_KIND_ENUM64: { -		struct btf_enum64 *m = btf_enum64(t); - -		for (i = 0, n = btf_vlen(t); i < n; i++, m++) { -			err = visit(&m->name_off, ctx); -			if (err) -				return err; -		} -		break; -	} -	case BTF_KIND_FUNC_PROTO: { -		struct btf_param *m = btf_params(t); - -		for (i = 0, n = btf_vlen(t); i < n; i++, m++) { -			err = visit(&m->name_off, ctx); -			if (err) -				return err; -		} -		break; -	} -	default: -		break; -	} - -	return 0; -} -  int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)  {  	const struct btf_ext_info *seg; @@ -5212,3 +5172,325 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void  	return 0;  } + +struct btf_distill { +	struct btf_pipe pipe; +	int *id_map; +	unsigned int split_start_id; +	unsigned int split_start_str; +	int diff_id; +}; + +static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i) +{ +	struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i); +	struct btf_field_iter it; +	__u32 *id; +	int err; + +	err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS); +	if (err) +		return err; +	while ((id = btf_field_iter_next(&it))) { +		struct btf_type *base_t; + +		if (!*id) +			continue; +		/* split BTF id, not needed */ +		if (*id >= dist->split_start_id) +			continue; +		/* already added ? */ +		if (dist->id_map[*id] > 0) +			continue; + +		/* only a subset of base BTF types should be referenced from +		 * split BTF; ensure nothing unexpected is referenced. +		 */ +		base_t = btf_type_by_id(dist->pipe.src, *id); +		switch (btf_kind(base_t)) { +		case BTF_KIND_INT: +		case BTF_KIND_FLOAT: +		case BTF_KIND_FWD: +		case BTF_KIND_ARRAY: +		case BTF_KIND_STRUCT: +		case BTF_KIND_UNION: +		case BTF_KIND_TYPEDEF: +		case BTF_KIND_ENUM: +		case BTF_KIND_ENUM64: +		case BTF_KIND_PTR: +		case BTF_KIND_CONST: +		case BTF_KIND_RESTRICT: +		case BTF_KIND_VOLATILE: +		case BTF_KIND_FUNC_PROTO: +		case BTF_KIND_TYPE_TAG: +			dist->id_map[*id] = *id; +			break; +		default: +			pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n", +				*id, btf_kind(base_t)); +			return -EINVAL; +		} +		/* If a base type is used, ensure types it refers to are +		 * marked as used also; so for example if we find a PTR to INT +		 * we need both the PTR and INT. +		 * +		 * The only exception is named struct/unions, since distilled +		 * base BTF composite types have no members. +		 */ +		if (btf_is_composite(base_t) && base_t->name_off) +			continue; +		err = btf_add_distilled_type_ids(dist, *id); +		if (err) +			return err; +	} +	return 0; +} + +static int btf_add_distilled_types(struct btf_distill *dist) +{ +	bool adding_to_base = dist->pipe.dst->start_id == 1; +	int id = btf__type_cnt(dist->pipe.dst); +	struct btf_type *t; +	int i, err = 0; + + +	/* Add types for each of the required references to either distilled +	 * base or split BTF, depending on type characteristics. +	 */ +	for (i = 1; i < dist->split_start_id; i++) { +		const char *name; +		int kind; + +		if (!dist->id_map[i]) +			continue; +		t = btf_type_by_id(dist->pipe.src, i); +		kind = btf_kind(t); +		name = btf__name_by_offset(dist->pipe.src, t->name_off); + +		switch (kind) { +		case BTF_KIND_INT: +		case BTF_KIND_FLOAT: +		case BTF_KIND_FWD: +			/* Named int, float, fwd are added to base. */ +			if (!adding_to_base) +				continue; +			err = btf_add_type(&dist->pipe, t); +			break; +		case BTF_KIND_STRUCT: +		case BTF_KIND_UNION: +			/* Named struct/union are added to base as 0-vlen +			 * struct/union of same size.  Anonymous struct/unions +			 * are added to split BTF as-is. +			 */ +			if (adding_to_base) { +				if (!t->name_off) +					continue; +				err = btf_add_composite(dist->pipe.dst, kind, name, t->size); +			} else { +				if (t->name_off) +					continue; +				err = btf_add_type(&dist->pipe, t); +			} +			break; +		case BTF_KIND_ENUM: +		case BTF_KIND_ENUM64: +			/* Named enum[64]s are added to base as a sized +			 * enum; relocation will match with appropriately-named +			 * and sized enum or enum64. +			 * +			 * Anonymous enums are added to split BTF as-is. +			 */ +			if (adding_to_base) { +				if (!t->name_off) +					continue; +				err = btf__add_enum(dist->pipe.dst, name, t->size); +			} else { +				if (t->name_off) +					continue; +				err = btf_add_type(&dist->pipe, t); +			} +			break; +		case BTF_KIND_ARRAY: +		case BTF_KIND_TYPEDEF: +		case BTF_KIND_PTR: +		case BTF_KIND_CONST: +		case BTF_KIND_RESTRICT: +		case BTF_KIND_VOLATILE: +		case BTF_KIND_FUNC_PROTO: +		case BTF_KIND_TYPE_TAG: +			/* All other types are added to split BTF. */ +			if (adding_to_base) +				continue; +			err = btf_add_type(&dist->pipe, t); +			break; +		default: +			pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n", +				name, i, kind); +			return -EINVAL; + +		} +		if (err < 0) +			break; +		dist->id_map[i] = id++; +	} +	return err; +} + +/* Split BTF ids without a mapping will be shifted downwards since distilled + * base BTF is smaller than the original base BTF.  For those that have a + * mapping (either to base or updated split BTF), update the id based on + * that mapping. + */ +static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i) +{ +	struct btf_type *t = btf_type_by_id(dist->pipe.dst, i); +	struct btf_field_iter it; +	__u32 *id; +	int err; + +	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); +	if (err) +		return err; +	while ((id = btf_field_iter_next(&it))) { +		if (dist->id_map[*id]) +			*id = dist->id_map[*id]; +		else if (*id >= dist->split_start_id) +			*id -= dist->diff_id; +	} +	return 0; +} + +/* Create updated split BTF with distilled base BTF; distilled base BTF + * consists of BTF information required to clarify the types that split + * BTF refers to, omitting unneeded details.  Specifically it will contain + * base types and memberless definitions of named structs, unions and enumerated + * types. Associated reference types like pointers, arrays and anonymous + * structs, unions and enumerated types will be added to split BTF. + * Size is recorded for named struct/unions to help guide matching to the + * target base BTF during later relocation. + * + * The only case where structs, unions or enumerated types are fully represented + * is when they are anonymous; in such cases, the anonymous type is added to + * split BTF in full. + * + * We return newly-created split BTF where the split BTF refers to a newly-created + * distilled base BTF. Both must be freed separately by the caller. + */ +int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, +		      struct btf **new_split_btf) +{ +	struct btf *new_base = NULL, *new_split = NULL; +	const struct btf *old_base; +	unsigned int n = btf__type_cnt(src_btf); +	struct btf_distill dist = {}; +	struct btf_type *t; +	int i, err = 0; + +	/* src BTF must be split BTF. */ +	old_base = btf__base_btf(src_btf); +	if (!new_base_btf || !new_split_btf || !old_base) +		return libbpf_err(-EINVAL); + +	new_base = btf__new_empty(); +	if (!new_base) +		return libbpf_err(-ENOMEM); +	dist.id_map = calloc(n, sizeof(*dist.id_map)); +	if (!dist.id_map) { +		err = -ENOMEM; +		goto done; +	} +	dist.pipe.src = src_btf; +	dist.pipe.dst = new_base; +	dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); +	if (IS_ERR(dist.pipe.str_off_map)) { +		err = -ENOMEM; +		goto done; +	} +	dist.split_start_id = btf__type_cnt(old_base); +	dist.split_start_str = old_base->hdr->str_len; + +	/* Pass over src split BTF; generate the list of base BTF type ids it +	 * references; these will constitute our distilled BTF set to be +	 * distributed over base and split BTF as appropriate. +	 */ +	for (i = src_btf->start_id; i < n; i++) { +		err = btf_add_distilled_type_ids(&dist, i); +		if (err < 0) +			goto done; +	} +	/* Next add types for each of the required references to base BTF and split BTF +	 * in turn. +	 */ +	err = btf_add_distilled_types(&dist); +	if (err < 0) +		goto done; + +	/* Create new split BTF with distilled base BTF as its base; the final +	 * state is split BTF with distilled base BTF that represents enough +	 * about its base references to allow it to be relocated with the base +	 * BTF available. +	 */ +	new_split = btf__new_empty_split(new_base); +	if (!new_split) { +		err = -errno; +		goto done; +	} +	dist.pipe.dst = new_split; +	/* First add all split types */ +	for (i = src_btf->start_id; i < n; i++) { +		t = btf_type_by_id(src_btf, i); +		err = btf_add_type(&dist.pipe, t); +		if (err < 0) +			goto done; +	} +	/* Now add distilled types to split BTF that are not added to base. */ +	err = btf_add_distilled_types(&dist); +	if (err < 0) +		goto done; + +	/* All split BTF ids will be shifted downwards since there are less base +	 * BTF ids in distilled base BTF. +	 */ +	dist.diff_id = dist.split_start_id - btf__type_cnt(new_base); + +	n = btf__type_cnt(new_split); +	/* Now update base/split BTF ids. */ +	for (i = 1; i < n; i++) { +		err = btf_update_distilled_type_ids(&dist, i); +		if (err < 0) +			break; +	} +done: +	free(dist.id_map); +	hashmap__free(dist.pipe.str_off_map); +	if (err) { +		btf__free(new_split); +		btf__free(new_base); +		return libbpf_err(err); +	} +	*new_base_btf = new_base; +	*new_split_btf = new_split; + +	return 0; +} + +const struct btf_header *btf_header(const struct btf *btf) +{ +	return btf->hdr; +} + +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) +{ +	btf->base_btf = (struct btf *)base_btf; +	btf->start_id = btf__type_cnt(base_btf); +	btf->start_str_off = base_btf->hdr->str_len; +} + +int btf__relocate(struct btf *btf, const struct btf *base_btf) +{ +	int err = btf_relocate(btf, base_btf, NULL); + +	if (!err) +		btf->owns_base = false; +	return libbpf_err(err); +} diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 8e6880d91c84..b68d216837a9 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -18,6 +18,7 @@ extern "C" {  #define BTF_ELF_SEC ".BTF"  #define BTF_EXT_ELF_SEC ".BTF.ext" +#define BTF_BASE_ELF_SEC ".BTF.base"  #define MAPS_ELF_SEC ".maps"  struct btf; @@ -107,6 +108,27 @@ LIBBPF_API struct btf *btf__new_empty(void);   */  LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf); +/** + * @brief **btf__distill_base()** creates new versions of the split BTF + * *src_btf* and its base BTF. The new base BTF will only contain the types + * needed to improve robustness of the split BTF to small changes in base BTF. + * When that split BTF is loaded against a (possibly changed) base, this + * distilled base BTF will help update references to that (possibly changed) + * base BTF. + * + * Both the new split and its associated new base BTF must be freed by + * the caller. + * + * If successful, 0 is returned and **new_base_btf** and **new_split_btf** + * will point at new base/split BTF. Both the new split and its associated + * new base BTF must be freed by the caller. + * + * A negative value is returned on error and the thread-local `errno` variable + * is set to the error code as well. + */ +LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, +				 struct btf **new_split_btf); +  LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);  LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);  LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); @@ -231,6 +253,20 @@ struct btf_dedup_opts {  LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts); +/** + * @brief **btf__relocate()** will check the split BTF *btf* for references + * to base BTF kinds, and verify those references are compatible with + * *base_btf*; if they are, *btf* is adjusted such that is re-parented to + * *base_btf* and type ids and strings are adjusted to accommodate this. + * + * If successful, 0 is returned and **btf** now has **base_btf** as its + * base. + * + * A negative value is returned on error and the thread-local `errno` variable + * is set to the error code as well. + */ +LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf); +  struct btf_dump;  struct btf_dump_opts { diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 5dbca76b953f..894860111ddb 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1559,10 +1559,12 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,  			 * Clang for BPF target generates func_proto with no  			 * args as a func_proto with a single void arg (e.g.,  			 * `int (*f)(void)` vs just `int (*f)()`). We are -			 * going to pretend there are no args for such case. +			 * going to emit valid empty args (void) syntax for +			 * such case. Similarly and conveniently, valid +			 * no args case can be special-cased here as well.  			 */ -			if (vlen == 1 && p->type == 0) { -				btf_dump_printf(d, ")"); +			if (vlen == 0 || (vlen == 1 && p->type == 0)) { +				btf_dump_printf(d, "void)");  				return;  			} diff --git a/tools/lib/bpf/btf_iter.c b/tools/lib/bpf/btf_iter.c new file mode 100644 index 000000000000..9a6c822c2294 --- /dev/null +++ b/tools/lib/bpf/btf_iter.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2021 Facebook */ +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#ifdef __KERNEL__ +#include <linux/bpf.h> +#include <linux/btf.h> + +#define btf_var_secinfos(t)	(struct btf_var_secinfo *)btf_type_var_secinfo(t) + +#else +#include "btf.h" +#include "libbpf_internal.h" +#endif + +int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, +			enum btf_field_iter_kind iter_kind) +{ +	it->p = NULL; +	it->m_idx = -1; +	it->off_idx = 0; +	it->vlen = 0; + +	switch (iter_kind) { +	case BTF_FIELD_ITER_IDS: +		switch (btf_kind(t)) { +		case BTF_KIND_UNKN: +		case BTF_KIND_INT: +		case BTF_KIND_FLOAT: +		case BTF_KIND_ENUM: +		case BTF_KIND_ENUM64: +			it->desc = (struct btf_field_desc) {}; +			break; +		case BTF_KIND_FWD: +		case BTF_KIND_CONST: +		case BTF_KIND_VOLATILE: +		case BTF_KIND_RESTRICT: +		case BTF_KIND_PTR: +		case BTF_KIND_TYPEDEF: +		case BTF_KIND_FUNC: +		case BTF_KIND_VAR: +		case BTF_KIND_DECL_TAG: +		case BTF_KIND_TYPE_TAG: +			it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} }; +			break; +		case BTF_KIND_ARRAY: +			it->desc = (struct btf_field_desc) { +				2, {sizeof(struct btf_type) + offsetof(struct btf_array, type), +				sizeof(struct btf_type) + offsetof(struct btf_array, index_type)} +			}; +			break; +		case BTF_KIND_STRUCT: +		case BTF_KIND_UNION: +			it->desc = (struct btf_field_desc) { +				0, {}, +				sizeof(struct btf_member), +				1, {offsetof(struct btf_member, type)} +			}; +			break; +		case BTF_KIND_FUNC_PROTO: +			it->desc = (struct btf_field_desc) { +				1, {offsetof(struct btf_type, type)}, +				sizeof(struct btf_param), +				1, {offsetof(struct btf_param, type)} +			}; +			break; +		case BTF_KIND_DATASEC: +			it->desc = (struct btf_field_desc) { +				0, {}, +				sizeof(struct btf_var_secinfo), +				1, {offsetof(struct btf_var_secinfo, type)} +			}; +			break; +		default: +			return -EINVAL; +		} +		break; +	case BTF_FIELD_ITER_STRS: +		switch (btf_kind(t)) { +		case BTF_KIND_UNKN: +			it->desc = (struct btf_field_desc) {}; +			break; +		case BTF_KIND_INT: +		case BTF_KIND_FLOAT: +		case BTF_KIND_FWD: +		case BTF_KIND_ARRAY: +		case BTF_KIND_CONST: +		case BTF_KIND_VOLATILE: +		case BTF_KIND_RESTRICT: +		case BTF_KIND_PTR: +		case BTF_KIND_TYPEDEF: +		case BTF_KIND_FUNC: +		case BTF_KIND_VAR: +		case BTF_KIND_DECL_TAG: +		case BTF_KIND_TYPE_TAG: +		case BTF_KIND_DATASEC: +			it->desc = (struct btf_field_desc) { +				1, {offsetof(struct btf_type, name_off)} +			}; +			break; +		case BTF_KIND_ENUM: +			it->desc = (struct btf_field_desc) { +				1, {offsetof(struct btf_type, name_off)}, +				sizeof(struct btf_enum), +				1, {offsetof(struct btf_enum, name_off)} +			}; +			break; +		case BTF_KIND_ENUM64: +			it->desc = (struct btf_field_desc) { +				1, {offsetof(struct btf_type, name_off)}, +				sizeof(struct btf_enum64), +				1, {offsetof(struct btf_enum64, name_off)} +			}; +			break; +		case BTF_KIND_STRUCT: +		case BTF_KIND_UNION: +			it->desc = (struct btf_field_desc) { +				1, {offsetof(struct btf_type, name_off)}, +				sizeof(struct btf_member), +				1, {offsetof(struct btf_member, name_off)} +			}; +			break; +		case BTF_KIND_FUNC_PROTO: +			it->desc = (struct btf_field_desc) { +				1, {offsetof(struct btf_type, name_off)}, +				sizeof(struct btf_param), +				1, {offsetof(struct btf_param, name_off)} +			}; +			break; +		default: +			return -EINVAL; +		} +		break; +	default: +		return -EINVAL; +	} + +	if (it->desc.m_sz) +		it->vlen = btf_vlen(t); + +	it->p = t; +	return 0; +} + +__u32 *btf_field_iter_next(struct btf_field_iter *it) +{ +	if (!it->p) +		return NULL; + +	if (it->m_idx < 0) { +		if (it->off_idx < it->desc.t_off_cnt) +			return it->p + it->desc.t_offs[it->off_idx++]; +		/* move to per-member iteration */ +		it->m_idx = 0; +		it->p += sizeof(struct btf_type); +		it->off_idx = 0; +	} + +	/* if type doesn't have members, stop */ +	if (it->desc.m_sz == 0) { +		it->p = NULL; +		return NULL; +	} + +	if (it->off_idx >= it->desc.m_off_cnt) { +		/* exhausted this member's fields, go to the next member */ +		it->m_idx++; +		it->p += it->desc.m_sz; +		it->off_idx = 0; +	} + +	if (it->m_idx < it->vlen) +		return it->p + it->desc.m_offs[it->off_idx++]; + +	it->p = NULL; +	return NULL; +} diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c new file mode 100644 index 000000000000..17f8b32f94a0 --- /dev/null +++ b/tools/lib/bpf/btf_relocate.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifdef __KERNEL__ +#include <linux/bpf.h> +#include <linux/bsearch.h> +#include <linux/btf.h> +#include <linux/sort.h> +#include <linux/string.h> +#include <linux/bpf_verifier.h> + +#define btf_type_by_id				(struct btf_type *)btf_type_by_id +#define btf__type_cnt				btf_nr_types +#define btf__base_btf				btf_base_btf +#define btf__name_by_offset			btf_name_by_offset +#define btf__str_by_offset			btf_str_by_offset +#define btf_kflag				btf_type_kflag + +#define calloc(nmemb, sz)			kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN) +#define free(ptr)				kvfree(ptr) +#define qsort(base, num, sz, cmp)		sort(base, num, sz, cmp, NULL) + +#else + +#include "btf.h" +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" + +#endif /* __KERNEL__ */ + +struct btf; + +struct btf_relocate { +	struct btf *btf; +	const struct btf *base_btf; +	const struct btf *dist_base_btf; +	unsigned int nr_base_types; +	unsigned int nr_split_types; +	unsigned int nr_dist_base_types; +	int dist_str_len; +	int base_str_len; +	__u32 *id_map; +	__u32 *str_map; +}; + +/* Set temporarily in relocation id_map if distilled base struct/union is + * embedded in a split BTF struct/union; in such a case, size information must + * match between distilled base BTF and base BTF representation of type. + */ +#define BTF_IS_EMBEDDED ((__u32)-1) + +/* <name, size, id> triple used in sorting/searching distilled base BTF. */ +struct btf_name_info { +	const char *name; +	/* set when search requires a size match */ +	bool needs_size: 1; +	unsigned int size: 31; +	__u32 id; +}; + +static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i) +{ +	struct btf_type *t = btf_type_by_id(r->btf, i); +	struct btf_field_iter it; +	__u32 *id; +	int err; + +	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); +	if (err) +		return err; + +	while ((id = btf_field_iter_next(&it))) +		*id = r->id_map[*id]; +	return 0; +} + +/* Simple string comparison used for sorting within BTF, since all distilled + * types are named.  If strings match, and size is non-zero for both elements + * fall back to using size for ordering. + */ +static int cmp_btf_name_size(const void *n1, const void *n2) +{ +	const struct btf_name_info *ni1 = n1; +	const struct btf_name_info *ni2 = n2; +	int name_diff = strcmp(ni1->name, ni2->name); + +	if (!name_diff && ni1->needs_size && ni2->needs_size) +		return ni2->size - ni1->size; +	return name_diff; +} + +/* Binary search with a small twist; find leftmost element that matches + * so that we can then iterate through all exact matches.  So for example + * searching { "a", "bb", "bb", "c" }  we would always match on the + * leftmost "bb". + */ +static struct btf_name_info *search_btf_name_size(struct btf_name_info *key, +						  struct btf_name_info *vals, +						  int nelems) +{ +	struct btf_name_info *ret = NULL; +	int high = nelems - 1; +	int low = 0; + +	while (low <= high) { +		int mid = (low + high)/2; +		struct btf_name_info *val = &vals[mid]; +		int diff = cmp_btf_name_size(key, val); + +		if (diff == 0) +			ret = val; +		/* even if found, keep searching for leftmost match */ +		if (diff <= 0) +			high = mid - 1; +		else +			low = mid + 1; +	} +	return ret; +} + +/* If a member of a split BTF struct/union refers to a base BTF + * struct/union, mark that struct/union id temporarily in the id_map + * with BTF_IS_EMBEDDED.  Members can be const/restrict/volatile/typedef + * reference types, but if a pointer is encountered, the type is no longer + * considered embedded. + */ +static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i) +{ +	struct btf_type *t = btf_type_by_id(r->btf, i); +	struct btf_field_iter it; +	__u32 *id; +	int err; + +	if (!btf_is_composite(t)) +		return 0; + +	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); +	if (err) +		return err; + +	while ((id = btf_field_iter_next(&it))) { +		__u32 next_id = *id; + +		while (next_id) { +			t = btf_type_by_id(r->btf, next_id); +			switch (btf_kind(t)) { +			case BTF_KIND_CONST: +			case BTF_KIND_RESTRICT: +			case BTF_KIND_VOLATILE: +			case BTF_KIND_TYPEDEF: +			case BTF_KIND_TYPE_TAG: +				next_id = t->type; +				break; +			case BTF_KIND_ARRAY: { +				struct btf_array *a = btf_array(t); + +				next_id = a->type; +				break; +			} +			case BTF_KIND_STRUCT: +			case BTF_KIND_UNION: +				if (next_id < r->nr_dist_base_types) +					r->id_map[next_id] = BTF_IS_EMBEDDED; +				next_id = 0; +				break; +			default: +				next_id = 0; +				break; +			} +		} +	} + +	return 0; +} + +/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate + * through base BTF looking up distilled type (using binary search) equivalents. + */ +static int btf_relocate_map_distilled_base(struct btf_relocate *r) +{ +	struct btf_name_info *info, *info_end; +	struct btf_type *base_t, *dist_t; +	__u8 *base_name_cnt = NULL; +	int err = 0; +	__u32 id; + +	/* generate a sort index array of name/type ids sorted by name for +	 * distilled base BTF to speed name-based lookups. +	 */ +	info = calloc(r->nr_dist_base_types, sizeof(*info)); +	if (!info) { +		err = -ENOMEM; +		goto done; +	} +	info_end = info + r->nr_dist_base_types; +	for (id = 0; id < r->nr_dist_base_types; id++) { +		dist_t = btf_type_by_id(r->dist_base_btf, id); +		info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off); +		info[id].id = id; +		info[id].size = dist_t->size; +		info[id].needs_size = true; +	} +	qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size); + +	/* Mark distilled base struct/union members of split BTF structs/unions +	 * in id_map with BTF_IS_EMBEDDED; this signals that these types +	 * need to match both name and size, otherwise embedding the base +	 * struct/union in the split type is invalid. +	 */ +	for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) { +		err = btf_mark_embedded_composite_type_ids(r, id); +		if (err) +			goto done; +	} + +	/* Collect name counts for composite types in base BTF.  If multiple +	 * instances of a struct/union of the same name exist, we need to use +	 * size to determine which to map to since name alone is ambiguous. +	 */ +	base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt)); +	if (!base_name_cnt) { +		err = -ENOMEM; +		goto done; +	} +	for (id = 1; id < r->nr_base_types; id++) { +		base_t = btf_type_by_id(r->base_btf, id); +		if (!btf_is_composite(base_t) || !base_t->name_off) +			continue; +		if (base_name_cnt[base_t->name_off] < 255) +			base_name_cnt[base_t->name_off]++; +	} + +	/* Now search base BTF for matching distilled base BTF types. */ +	for (id = 1; id < r->nr_base_types; id++) { +		struct btf_name_info *dist_info, base_info = {}; +		int dist_kind, base_kind; + +		base_t = btf_type_by_id(r->base_btf, id); +		/* distilled base consists of named types only. */ +		if (!base_t->name_off) +			continue; +		base_kind = btf_kind(base_t); +		base_info.id = id; +		base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off); +		switch (base_kind) { +		case BTF_KIND_INT: +		case BTF_KIND_FLOAT: +		case BTF_KIND_ENUM: +		case BTF_KIND_ENUM64: +			/* These types should match both name and size */ +			base_info.needs_size = true; +			base_info.size = base_t->size; +			break; +		case BTF_KIND_FWD: +			/* No size considerations for fwds. */ +			break; +		case BTF_KIND_STRUCT: +		case BTF_KIND_UNION: +			/* Size only needs to be used for struct/union if there +			 * are multiple types in base BTF with the same name. +			 * If there are multiple _distilled_ types with the same +			 * name (a very unlikely scenario), that doesn't matter +			 * unless corresponding _base_ types to match them are +			 * missing. +			 */ +			base_info.needs_size = base_name_cnt[base_t->name_off] > 1; +			base_info.size = base_t->size; +			break; +		default: +			continue; +		} +		/* iterate over all matching distilled base types */ +		for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types); +		     dist_info != NULL && dist_info < info_end && +		     cmp_btf_name_size(&base_info, dist_info) == 0; +		     dist_info++) { +			if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) { +				pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n", +					id, dist_info->id); +				err = -EINVAL; +				goto done; +			} +			dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id); +			dist_kind = btf_kind(dist_t); + +			/* Validate that the found distilled type is compatible. +			 * Do not error out on mismatch as another match may +			 * occur for an identically-named type. +			 */ +			switch (dist_kind) { +			case BTF_KIND_FWD: +				switch (base_kind) { +				case BTF_KIND_FWD: +					if (btf_kflag(dist_t) != btf_kflag(base_t)) +						continue; +					break; +				case BTF_KIND_STRUCT: +					if (btf_kflag(base_t)) +						continue; +					break; +				case BTF_KIND_UNION: +					if (!btf_kflag(base_t)) +						continue; +					break; +				default: +					continue; +				} +				break; +			case BTF_KIND_INT: +				if (dist_kind != base_kind || +				    btf_int_encoding(base_t) != btf_int_encoding(dist_t)) +					continue; +				break; +			case BTF_KIND_FLOAT: +				if (dist_kind != base_kind) +					continue; +				break; +			case BTF_KIND_ENUM: +				/* ENUM and ENUM64 are encoded as sized ENUM in +				 * distilled base BTF. +				 */ +				if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64) +					continue; +				break; +			case BTF_KIND_STRUCT: +			case BTF_KIND_UNION: +				/* size verification is required for embedded +				 * struct/unions. +				 */ +				if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED && +				    base_t->size != dist_t->size) +					continue; +				break; +			default: +				continue; +			} +			if (r->id_map[dist_info->id] && +			    r->id_map[dist_info->id] != BTF_IS_EMBEDDED) { +				/* we already have a match; this tells us that +				 * multiple base types of the same name +				 * have the same size, since for cases where +				 * multiple types have the same name we match +				 * on name and size.  In this case, we have +				 * no way of determining which to relocate +				 * to in base BTF, so error out. +				 */ +				pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n", +					base_info.name, dist_info->id, +					base_t->size, id, r->id_map[dist_info->id]); +				err = -EINVAL; +				goto done; +			} +			/* map id and name */ +			r->id_map[dist_info->id] = id; +			r->str_map[dist_t->name_off] = base_t->name_off; +		} +	} +	/* ensure all distilled BTF ids now have a mapping... */ +	for (id = 1; id < r->nr_dist_base_types; id++) { +		const char *name; + +		if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED) +			continue; +		dist_t = btf_type_by_id(r->dist_base_btf, id); +		name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off); +		pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n", +			name, id); +		err = -EINVAL; +		break; +	} +done: +	free(base_name_cnt); +	free(info); +	return err; +} + +/* distilled base should only have named int/float/enum/fwd/struct/union types. */ +static int btf_relocate_validate_distilled_base(struct btf_relocate *r) +{ +	unsigned int i; + +	for (i = 1; i < r->nr_dist_base_types; i++) { +		struct btf_type *t = btf_type_by_id(r->dist_base_btf, i); +		int kind = btf_kind(t); + +		switch (kind) { +		case BTF_KIND_INT: +		case BTF_KIND_FLOAT: +		case BTF_KIND_ENUM: +		case BTF_KIND_STRUCT: +		case BTF_KIND_UNION: +		case BTF_KIND_FWD: +			if (t->name_off) +				break; +			pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n", +				i, kind); +			return -EINVAL; +		default: +			pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n", +				i, kind); +			return -EINVAL; +		} +	} +	return 0; +} + +static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i) +{ +	struct btf_type *t = btf_type_by_id(r->btf, i); +	struct btf_field_iter it; +	__u32 *str_off; +	int off, err; + +	err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); +	if (err) +		return err; + +	while ((str_off = btf_field_iter_next(&it))) { +		if (!*str_off) +			continue; +		if (*str_off >= r->dist_str_len) { +			*str_off += r->base_str_len - r->dist_str_len; +		} else { +			off = r->str_map[*str_off]; +			if (!off) { +				pr_warn("string '%s' [offset %u] is not mapped to base BTF", +					btf__str_by_offset(r->btf, off), *str_off); +				return -ENOENT; +			} +			*str_off = off; +		} +	} +	return 0; +} + +/* If successful, output of relocation is updated BTF with base BTF pointing + * at base_btf, and type ids, strings adjusted accordingly. + */ +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map) +{ +	unsigned int nr_types = btf__type_cnt(btf); +	const struct btf_header *dist_base_hdr; +	const struct btf_header *base_hdr; +	struct btf_relocate r = {}; +	int err = 0; +	__u32 id, i; + +	r.dist_base_btf = btf__base_btf(btf); +	if (!base_btf || r.dist_base_btf == base_btf) +		return -EINVAL; + +	r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf); +	r.nr_base_types = btf__type_cnt(base_btf); +	r.nr_split_types = nr_types - r.nr_dist_base_types; +	r.btf = btf; +	r.base_btf = base_btf; + +	r.id_map = calloc(nr_types, sizeof(*r.id_map)); +	r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map)); +	dist_base_hdr = btf_header(r.dist_base_btf); +	base_hdr = btf_header(r.base_btf); +	r.dist_str_len = dist_base_hdr->str_len; +	r.base_str_len = base_hdr->str_len; +	if (!r.id_map || !r.str_map) { +		err = -ENOMEM; +		goto err_out; +	} + +	err = btf_relocate_validate_distilled_base(&r); +	if (err) +		goto err_out; + +	/* Split BTF ids need to be adjusted as base and distilled base +	 * have different numbers of types, changing the start id of split +	 * BTF. +	 */ +	for (id = r.nr_dist_base_types; id < nr_types; id++) +		r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types; + +	/* Build a map from distilled base ids to actual base BTF ids; it is used +	 * to update split BTF id references.  Also build a str_map mapping from +	 * distilled base BTF names to base BTF names. +	 */ +	err = btf_relocate_map_distilled_base(&r); +	if (err) +		goto err_out; + +	/* Next, rewrite type ids in split BTF, replacing split ids with updated +	 * ids based on number of types in base BTF, and base ids with +	 * relocated ids from base_btf. +	 */ +	for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) { +		err = btf_relocate_rewrite_type_id(&r, id); +		if (err) +			goto err_out; +	} +	/* String offsets now need to be updated using the str_map. */ +	for (i = 0; i < r.nr_split_types; i++) { +		err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types); +		if (err) +			goto err_out; +	} +	/* Finally reset base BTF to be base_btf */ +	btf_set_base_btf(btf, base_btf); + +	if (id_map) { +		*id_map = r.id_map; +		r.id_map = NULL; +	} +err_out: +	free(r.id_map); +	free(r.str_map); +	return err; +} diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5401f2df463d..a3be6f8fac09 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -229,7 +229,30 @@ static const char * const prog_type_name[] = {  static int __base_pr(enum libbpf_print_level level, const char *format,  		     va_list args)  { -	if (level == LIBBPF_DEBUG) +	const char *env_var = "LIBBPF_LOG_LEVEL"; +	static enum libbpf_print_level min_level = LIBBPF_INFO; +	static bool initialized; + +	if (!initialized) { +		char *verbosity; + +		initialized = true; +		verbosity = getenv(env_var); +		if (verbosity) { +			if (strcasecmp(verbosity, "warn") == 0) +				min_level = LIBBPF_WARN; +			else if (strcasecmp(verbosity, "debug") == 0) +				min_level = LIBBPF_DEBUG; +			else if (strcasecmp(verbosity, "info") == 0) +				min_level = LIBBPF_INFO; +			else +				fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n", +					env_var, verbosity); +		} +	} + +	/* if too verbose, skip logging  */ +	if (level > min_level)  		return 0;  	return vfprintf(stderr, format, args); @@ -549,6 +572,7 @@ struct bpf_map {  	bool pinned;  	bool reused;  	bool autocreate; +	bool autoattach;  	__u64 map_extra;  }; @@ -1377,6 +1401,7 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,  		map->def.value_size = type->size;  		map->def.max_entries = 1;  		map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0; +		map->autoattach = true;  		map->st_ops = calloc(1, sizeof(*map->st_ops));  		if (!map->st_ops) @@ -4796,6 +4821,20 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)  	return 0;  } +int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach) +{ +	if (!bpf_map__is_struct_ops(map)) +		return libbpf_err(-EINVAL); + +	map->autoattach = autoattach; +	return 0; +} + +bool bpf_map__autoattach(const struct bpf_map *map) +{ +	return map->autoattach; +} +  int bpf_map__reuse_fd(struct bpf_map *map, int fd)  {  	struct bpf_map_info info; @@ -10336,7 +10375,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)  struct bpf_map *  bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)  { -	if (prev == NULL) +	if (prev == NULL && obj != NULL)  		return obj->maps;  	return __bpf_map__iter(prev, obj, 1); @@ -10345,7 +10384,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)  struct bpf_map *  bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)  { -	if (next == NULL) { +	if (next == NULL && obj != NULL) {  		if (!obj->nr_maps)  			return NULL;  		return obj->maps + obj->nr_maps - 1; @@ -12877,8 +12916,10 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)  	__u32 zero = 0;  	int err, fd; -	if (!bpf_map__is_struct_ops(map)) +	if (!bpf_map__is_struct_ops(map)) { +		pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);  		return libbpf_err_ptr(-EINVAL); +	}  	if (map->fd < 0) {  		pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name); @@ -13671,14 +13712,15 @@ int libbpf_num_possible_cpus(void)  static int populate_skeleton_maps(const struct bpf_object *obj,  				  struct bpf_map_skeleton *maps, -				  size_t map_cnt) +				  size_t map_cnt, size_t map_skel_sz)  {  	int i;  	for (i = 0; i < map_cnt; i++) { -		struct bpf_map **map = maps[i].map; -		const char *name = maps[i].name; -		void **mmaped = maps[i].mmaped; +		struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz; +		struct bpf_map **map = map_skel->map; +		const char *name = map_skel->name; +		void **mmaped = map_skel->mmaped;  		*map = bpf_object__find_map_by_name(obj, name);  		if (!*map) { @@ -13695,13 +13737,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj,  static int populate_skeleton_progs(const struct bpf_object *obj,  				   struct bpf_prog_skeleton *progs, -				   size_t prog_cnt) +				   size_t prog_cnt, size_t prog_skel_sz)  {  	int i;  	for (i = 0; i < prog_cnt; i++) { -		struct bpf_program **prog = progs[i].prog; -		const char *name = progs[i].name; +		struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz; +		struct bpf_program **prog = prog_skel->prog; +		const char *name = prog_skel->name;  		*prog = bpf_object__find_program_by_name(obj, name);  		if (!*prog) { @@ -13742,13 +13785,13 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,  	}  	*s->obj = obj; -	err = populate_skeleton_maps(obj, s->maps, s->map_cnt); +	err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);  	if (err) {  		pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);  		return libbpf_err(err);  	} -	err = populate_skeleton_progs(obj, s->progs, s->prog_cnt); +	err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);  	if (err) {  		pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);  		return libbpf_err(err); @@ -13778,20 +13821,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)  		return libbpf_err(-errno);  	} -	err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt); +	err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);  	if (err) {  		pr_warn("failed to populate subskeleton maps: %d\n", err);  		return libbpf_err(err);  	} -	err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt); +	err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);  	if (err) {  		pr_warn("failed to populate subskeleton maps: %d\n", err);  		return libbpf_err(err);  	}  	for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { -		var_skel = &s->vars[var_idx]; +		var_skel = (void *)s->vars + var_idx * s->var_skel_sz;  		map = *var_skel->map;  		map_type_id = bpf_map__btf_value_type_id(map);  		map_type = btf__type_by_id(btf, map_type_id); @@ -13838,10 +13881,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)  	}  	for (i = 0; i < s->map_cnt; i++) { -		struct bpf_map *map = *s->maps[i].map; +		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; +		struct bpf_map *map = *map_skel->map;  		size_t mmap_sz = bpf_map_mmap_sz(map);  		int prot, map_fd = map->fd; -		void **mmaped = s->maps[i].mmaped; +		void **mmaped = map_skel->mmaped;  		if (!mmaped)  			continue; @@ -13889,8 +13933,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)  	int i, err;  	for (i = 0; i < s->prog_cnt; i++) { -		struct bpf_program *prog = *s->progs[i].prog; -		struct bpf_link **link = s->progs[i].link; +		struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; +		struct bpf_program *prog = *prog_skel->prog; +		struct bpf_link **link = prog_skel->link;  		if (!prog->autoload || !prog->autoattach)  			continue; @@ -13922,6 +13967,38 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)  		 */  	} + +	for (i = 0; i < s->map_cnt; i++) { +		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; +		struct bpf_map *map = *map_skel->map; +		struct bpf_link **link; + +		if (!map->autocreate || !map->autoattach) +			continue; + +		/* only struct_ops maps can be attached */ +		if (!bpf_map__is_struct_ops(map)) +			continue; + +		/* skeleton is created with earlier version of bpftool, notify user */ +		if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) { +			pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n", +				bpf_map__name(map)); +			continue; +		} + +		link = map_skel->link; +		if (*link) +			continue; + +		*link = bpf_map__attach_struct_ops(map); +		if (!*link) { +			err = -errno; +			pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err); +			return libbpf_err(err); +		} +	} +  	return 0;  } @@ -13930,11 +14007,25 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)  	int i;  	for (i = 0; i < s->prog_cnt; i++) { -		struct bpf_link **link = s->progs[i].link; +		struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; +		struct bpf_link **link = prog_skel->link;  		bpf_link__destroy(*link);  		*link = NULL;  	} + +	if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) +		return; + +	for (i = 0; i < s->map_cnt; i++) { +		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; +		struct bpf_link **link = map_skel->link; + +		if (link) { +			bpf_link__destroy(*link); +			*link = NULL; +		} +	}  }  void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) @@ -13942,8 +14033,7 @@ void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)  	if (!s)  		return; -	if (s->progs) -		bpf_object__detach_skeleton(s); +	bpf_object__detach_skeleton(s);  	if (s->obj)  		bpf_object__close(*s->obj);  	free(s->maps); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c3f77d9260fe..64a6a3d323e3 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -98,7 +98,10 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,  /**   * @brief **libbpf_set_print()** sets user-provided log callback function to - * be used for libbpf warnings and informational messages. + * be used for libbpf warnings and informational messages. If the user callback + * is not set, messages are logged to stderr by default. The verbosity of these + * messages can be controlled by setting the environment variable + * LIBBPF_LOG_LEVEL to either warn, info, or debug.   * @param fn The log print function. If NULL, libbpf won't print anything.   * @return Pointer to old print function.   * @@ -976,6 +979,23 @@ LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);  LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);  /** + * @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach + * map during BPF skeleton attach phase. + * @param map the BPF map instance + * @param autoattach whether to attach map during BPF skeleton attach phase + * @return 0 on success; negative error code, otherwise + */ +LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach); + +/** + * @brief **bpf_map__autoattach()** returns whether BPF map is configured to + * auto-attach during BPF skeleton attach phase. + * @param map the BPF map instance + * @return true if map is set to auto-attach during skeleton attach phase; false, otherwise + */ +LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map); + +/**   * @brief **bpf_map__fd()** gets the file descriptor of the passed   * BPF map   * @param map the BPF map instance @@ -1669,6 +1689,7 @@ struct bpf_map_skeleton {  	const char *name;  	struct bpf_map **map;  	void **mmaped; +	struct bpf_link **link;  };  struct bpf_prog_skeleton { diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index c1ce8aa3520b..8f0d9ea3b1b4 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -419,6 +419,10 @@ LIBBPF_1.4.0 {  LIBBPF_1.5.0 {  	global: +		btf__distill_base; +		btf__relocate; +		bpf_map__autoattach; +		bpf_map__set_autoattach;  		bpf_program__attach_sockmap;  		ring__consume_n;  		ring_buffer__consume_n; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index a0dcfb82e455..408df59e0771 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -234,6 +234,9 @@ struct btf_type;  struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);  const char *btf_kind_str(const struct btf_type *t);  const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); +const struct btf_header *btf_header(const struct btf *btf); +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf); +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map);  static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)  { @@ -508,11 +511,33 @@ struct bpf_line_info_min {  	__u32	line_col;  }; +enum btf_field_iter_kind { +	BTF_FIELD_ITER_IDS, +	BTF_FIELD_ITER_STRS, +}; + +struct btf_field_desc { +	/* once-per-type offsets */ +	int t_off_cnt, t_offs[2]; +	/* member struct size, or zero, if no members */ +	int m_sz; +	/* repeated per-member offsets */ +	int m_off_cnt, m_offs[1]; +}; + +struct btf_field_iter { +	struct btf_field_desc desc; +	void *p; +	int m_idx; +	int off_idx; +	int vlen; +}; + +int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind); +__u32 *btf_field_iter_next(struct btf_field_iter *it);  typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);  typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx); -int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx); -int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);  int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);  int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);  __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, @@ -597,13 +622,9 @@ static inline int ensure_good_fd(int fd)  	return fd;  } -static inline int sys_dup2(int oldfd, int newfd) +static inline int sys_dup3(int oldfd, int newfd, int flags)  { -#ifdef __NR_dup2 -	return syscall(__NR_dup2, oldfd, newfd); -#else -	return syscall(__NR_dup3, oldfd, newfd, 0); -#endif +	return syscall(__NR_dup3, oldfd, newfd, flags);  }  /* Point *fixed_fd* to the same file that *tmp_fd* points to. @@ -614,7 +635,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)  {  	int err; -	err = sys_dup2(tmp_fd, fixed_fd); +	err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC);  	err = err < 0 ? -errno : 0;  	close(tmp_fd); /* clean up temporary FD */  	return err; diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 0d4be829551b..9cd3d4109788 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -957,19 +957,33 @@ static int check_btf_str_off(__u32 *str_off, void *ctx)  static int linker_sanity_check_btf(struct src_obj *obj)  {  	struct btf_type *t; -	int i, n, err = 0; +	int i, n, err;  	if (!obj->btf)  		return 0;  	n = btf__type_cnt(obj->btf);  	for (i = 1; i < n; i++) { +		struct btf_field_iter it; +		__u32 *type_id, *str_off; +  		t = btf_type_by_id(obj->btf, i); -		err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf); -		err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf); +		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); +		if (err) +			return err; +		while ((type_id = btf_field_iter_next(&it))) { +			if (*type_id >= n) +				return -EINVAL; +		} + +		err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);  		if (err)  			return err; +		while ((str_off = btf_field_iter_next(&it))) { +			if (!btf__str_by_offset(obj->btf, *str_off)) +				return -EINVAL; +		}  	}  	return 0; @@ -2213,10 +2227,17 @@ static int linker_fixup_btf(struct src_obj *obj)  		vi = btf_var_secinfos(t);  		for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {  			const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type); -			const char *var_name = btf__str_by_offset(obj->btf, vt->name_off); -			int var_linkage = btf_var(vt)->linkage; +			const char *var_name; +			int var_linkage;  			Elf64_Sym *sym; +			/* could be a variable or function */ +			if (!btf_is_var(vt)) +				continue; + +			var_name = btf__str_by_offset(obj->btf, vt->name_off); +			var_linkage = btf_var(vt)->linkage; +  			/* no need to patch up static or extern vars */  			if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)  				continue; @@ -2234,26 +2255,10 @@ static int linker_fixup_btf(struct src_obj *obj)  	return 0;  } -static int remap_type_id(__u32 *type_id, void *ctx) -{ -	int *id_map = ctx; -	int new_id = id_map[*type_id]; - -	/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */ -	if (new_id == 0 && *type_id != 0) { -		pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id); -		return -EINVAL; -	} - -	*type_id = id_map[*type_id]; - -	return 0; -} -  static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)  {  	const struct btf_type *t; -	int i, j, n, start_id, id; +	int i, j, n, start_id, id, err;  	const char *name;  	if (!obj->btf) @@ -2324,9 +2329,25 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)  	n = btf__type_cnt(linker->btf);  	for (i = start_id; i < n; i++) {  		struct btf_type *dst_t = btf_type_by_id(linker->btf, i); +		struct btf_field_iter it; +		__u32 *type_id; -		if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map)) -			return -EINVAL; +		err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS); +		if (err) +			return err; + +		while ((type_id = btf_field_iter_next(&it))) { +			int new_id = obj->btf_type_map[*type_id]; + +			/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */ +			if (new_id == 0 && *type_id != 0) { +				pr_warn("failed to find new ID mapping for original BTF type ID %u\n", +					*type_id); +				return -EINVAL; +			} + +			*type_id = obj->btf_type_map[*type_id]; +		}  	}  	/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's |