diff options
Diffstat (limited to 'tools/lib/bpf/btf.c')
| -rw-r--r-- | tools/lib/bpf/btf.c | 206 | 
1 files changed, 154 insertions, 52 deletions
| diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 7e4c5586bd87..9aa19c89f758 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -299,6 +299,7 @@ static int btf_type_size(const struct btf_type *t)  	case BTF_KIND_TYPEDEF:  	case BTF_KIND_FUNC:  	case BTF_KIND_FLOAT: +	case BTF_KIND_TYPE_TAG:  		return base_size;  	case BTF_KIND_INT:  		return base_size + sizeof(__u32); @@ -349,6 +350,7 @@ static int btf_bswap_type_rest(struct btf_type *t)  	case BTF_KIND_TYPEDEF:  	case BTF_KIND_FUNC:  	case BTF_KIND_FLOAT: +	case BTF_KIND_TYPE_TAG:  		return 0;  	case BTF_KIND_INT:  		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1)); @@ -452,7 +454,7 @@ const struct btf *btf__base_btf(const struct btf *btf)  }  /* internal helper returning non-const pointer to a type */ -struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id) +struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)  {  	if (type_id == 0)  		return &btf_void; @@ -608,6 +610,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)  		case BTF_KIND_RESTRICT:  		case BTF_KIND_VAR:  		case BTF_KIND_DECL_TAG: +		case BTF_KIND_TYPE_TAG:  			type_id = t->type;  			break;  		case BTF_KIND_ARRAY: @@ -649,6 +652,7 @@ int btf__align_of(const struct btf *btf, __u32 id)  	case BTF_KIND_VOLATILE:  	case BTF_KIND_CONST:  	case BTF_KIND_RESTRICT: +	case BTF_KIND_TYPE_TAG:  		return btf__align_of(btf, t->type);  	case BTF_KIND_ARRAY:  		return btf__align_of(btf, btf_array(t)->type); @@ -1120,54 +1124,86 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)  static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); -int btf__load_into_kernel(struct btf *btf) +int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)  { -	__u32 log_buf_size = 0, raw_size; -	char *log_buf = NULL; +	LIBBPF_OPTS(bpf_btf_load_opts, opts); +	__u32 buf_sz = 0, raw_size; +	char *buf = NULL, *tmp;  	void *raw_data;  	int err = 0;  	if (btf->fd >= 0)  		return libbpf_err(-EEXIST); +	if (log_sz && !log_buf) +		return libbpf_err(-EINVAL); -retry_load: -	if (log_buf_size) { -		log_buf = malloc(log_buf_size); -		if (!log_buf) -			return libbpf_err(-ENOMEM); - -		*log_buf = 0; -	} - +	/* cache native raw data representation */  	raw_data = btf_get_raw_data(btf, &raw_size, false);  	if (!raw_data) {  		err = -ENOMEM;  		goto done;  	} -	/* cache native raw data representation */  	btf->raw_size = raw_size;  	btf->raw_data = raw_data; -	btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false); +retry_load: +	/* if log_level is 0, we won't provide log_buf/log_size to the kernel, +	 * initially. Only if BTF loading fails, we bump log_level to 1 and +	 * retry, using either auto-allocated or custom log_buf. This way +	 * non-NULL custom log_buf provides a buffer just in case, but hopes +	 * for successful load and no need for log_buf. +	 */ +	if (log_level) { +		/* if caller didn't provide custom log_buf, we'll keep +		 * allocating our own progressively bigger buffers for BTF +		 * verification log +		 */ +		if (!log_buf) { +			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2); +			tmp = realloc(buf, buf_sz); +			if (!tmp) { +				err = -ENOMEM; +				goto done; +			} +			buf = tmp; +			buf[0] = '\0'; +		} + +		opts.log_buf = log_buf ? log_buf : buf; +		opts.log_size = log_buf ? log_sz : buf_sz; +		opts.log_level = log_level; +	} + +	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);  	if (btf->fd < 0) { -		if (!log_buf || errno == ENOSPC) { -			log_buf_size = max((__u32)BPF_LOG_BUF_SIZE, -					   log_buf_size << 1); -			free(log_buf); +		/* time to turn on verbose mode and try again */ +		if (log_level == 0) { +			log_level = 1;  			goto retry_load;  		} +		/* only retry if caller didn't provide custom log_buf, but +		 * make sure we can never overflow buf_sz +		 */ +		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2) +			goto retry_load;  		err = -errno; -		pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno); -		if (*log_buf) -			pr_warn("%s\n", log_buf); -		goto done; +		pr_warn("BTF loading error: %d\n", err); +		/* don't print out contents of custom log_buf */ +		if (!log_buf && buf[0]) +			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);  	}  done: -	free(log_buf); +	free(buf);  	return libbpf_err(err);  } + +int btf__load_into_kernel(struct btf *btf) +{ +	return btf_load_into_kernel(btf, NULL, 0, 0); +} +  int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel")));  int btf__fd(const struct btf *btf) @@ -2236,6 +2272,22 @@ int btf__add_restrict(struct btf *btf, int ref_type_id)  }  /* + * Append new BTF_KIND_TYPE_TAG type with: + *   - *value*, non-empty/non-NULL tag value; + *   - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + *   - >0, type ID of newly added BTF type; + *   - <0, on error. + */ +int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id) +{ +	if (!value|| !value[0]) +		return libbpf_err(-EINVAL); + +	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id); +} + +/*   * Append new BTF_KIND_FUNC type with:   *   - *name*, non-empty/non-NULL name;   *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet; @@ -2711,15 +2763,11 @@ void btf_ext__free(struct btf_ext *btf_ext)  	free(btf_ext);  } -struct btf_ext *btf_ext__new(__u8 *data, __u32 size) +struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)  {  	struct btf_ext *btf_ext;  	int err; -	err = btf_ext_parse_hdr(data, size); -	if (err) -		return libbpf_err_ptr(err); -  	btf_ext = calloc(1, sizeof(struct btf_ext));  	if (!btf_ext)  		return libbpf_err_ptr(-ENOMEM); @@ -2732,6 +2780,10 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)  	}  	memcpy(btf_ext->data, data, size); +	err = btf_ext_parse_hdr(btf_ext->data, size); +	if (err) +		goto done; +  	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {  		err = -EINVAL;  		goto done; @@ -2846,8 +2898,7 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)  struct btf_dedup; -static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, -				       const struct btf_dedup_opts *opts); +static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);  static void btf_dedup_free(struct btf_dedup *d);  static int btf_dedup_prep(struct btf_dedup *d);  static int btf_dedup_strings(struct btf_dedup *d); @@ -2994,12 +3045,17 @@ static int btf_dedup_remap_types(struct btf_dedup *d);   * deduplicating structs/unions is described in greater details in comments for   * `btf_dedup_is_equiv` function.   */ -int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, -	       const struct btf_dedup_opts *opts) + +DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0) +int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts)  { -	struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts); +	struct btf_dedup *d;  	int err; +	if (!OPTS_VALID(opts, btf_dedup_opts)) +		return libbpf_err(-EINVAL); + +	d = btf_dedup_new(btf, opts);  	if (IS_ERR(d)) {  		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));  		return libbpf_err(-EINVAL); @@ -3051,6 +3107,19 @@ done:  	return libbpf_err(err);  } +COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2) +int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts) +{ +	LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext); + +	if (unused_opts) { +		pr_warn("please use new version of btf__dedup() that supports options\n"); +		return libbpf_err(-ENOTSUP); +	} + +	return btf__dedup(btf, &opts); +} +  #define BTF_UNPROCESSED_ID ((__u32)-1)  #define BTF_IN_PROGRESS_ID ((__u32)-2) @@ -3163,8 +3232,7 @@ static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)  	return k1 == k2;  } -static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, -				       const struct btf_dedup_opts *opts) +static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)  {  	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));  	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; @@ -3173,13 +3241,11 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,  	if (!d)  		return ERR_PTR(-ENOMEM); -	d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; -	/* dedup_table_size is now used only to force collisions in tests */ -	if (opts && opts->dedup_table_size == 1) +	if (OPTS_GET(opts, force_collisions, false))  		hash_fn = btf_dedup_collision_hash_fn;  	d->btf = btf; -	d->btf_ext = btf_ext; +	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);  	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);  	if (IS_ERR(d->dedup_table)) { @@ -3443,8 +3509,8 @@ static long btf_hash_struct(struct btf_type *t)  }  /* - * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type - * IDs. This check is performed during type graph equivalence check and + * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced + * type IDs. This check is performed during type graph equivalence check and   * referenced types equivalence is checked separately.   */  static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) @@ -3625,6 +3691,7 @@ static int btf_dedup_prep(struct btf_dedup *d)  		case BTF_KIND_TYPEDEF:  		case BTF_KIND_FUNC:  		case BTF_KIND_FLOAT: +		case BTF_KIND_TYPE_TAG:  			h = btf_hash_common(t);  			break;  		case BTF_KIND_INT: @@ -3685,6 +3752,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)  	case BTF_KIND_VAR:  	case BTF_KIND_DATASEC:  	case BTF_KIND_DECL_TAG: +	case BTF_KIND_TYPE_TAG:  		return 0;  	case BTF_KIND_INT: @@ -3708,8 +3776,6 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)  				new_id = cand_id;  				break;  			} -			if (d->opts.dont_resolve_fwds) -				continue;  			if (btf_compat_enum(t, cand)) {  				if (btf_is_enum_fwd(t)) {  					/* resolve fwd to full enum */ @@ -3817,6 +3883,31 @@ static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)  	return btf_equal_array(t1, t2);  } +/* Check if given two types are identical STRUCT/UNION definitions */ +static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2) +{ +	const struct btf_member *m1, *m2; +	struct btf_type *t1, *t2; +	int n, i; + +	t1 = btf_type_by_id(d->btf, id1); +	t2 = btf_type_by_id(d->btf, id2); + +	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2)) +		return false; + +	if (!btf_shallow_equal_struct(t1, t2)) +		return false; + +	m1 = btf_members(t1); +	m2 = btf_members(t2); +	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { +		if (m1->type != m2->type) +			return false; +	} +	return true; +} +  /*   * Check equivalence of BTF type graph formed by candidate struct/union (we'll   * call it "candidate graph" in this description for brevity) to a type graph @@ -3928,6 +4019,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,  	hypot_type_id = d->hypot_map[canon_id];  	if (hypot_type_id <= BTF_MAX_NR_TYPES) { +		if (hypot_type_id == cand_id) +			return 1;  		/* In some cases compiler will generate different DWARF types  		 * for *identical* array type definitions and use them for  		 * different fields within the *same* struct. This breaks type @@ -3936,8 +4029,18 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,  		 * types within a single CU. So work around that by explicitly  		 * allowing identical array types here.  		 */ -		return hypot_type_id == cand_id || -		       btf_dedup_identical_arrays(d, hypot_type_id, cand_id); +		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id)) +			return 1; +		/* It turns out that similar situation can happen with +		 * struct/union sometimes, sigh... Handle the case where +		 * structs/unions are exactly the same, down to the referenced +		 * type IDs. Anything more complicated (e.g., if referenced +		 * types are different, but equivalent) is *way more* +		 * complicated and requires a many-to-many equivalence mapping. +		 */ +		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) +			return 1; +		return 0;  	}  	if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) @@ -3952,8 +4055,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,  		return 0;  	/* FWD <--> STRUCT/UNION equivalence check, if enabled */ -	if (!d->opts.dont_resolve_fwds -	    && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) +	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)  	    && cand_kind != canon_kind) {  		__u16 real_kind;  		__u16 fwd_kind; @@ -3979,10 +4081,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,  		return btf_equal_int_tag(cand_type, canon_type);  	case BTF_KIND_ENUM: -		if (d->opts.dont_resolve_fwds) -			return btf_equal_enum(cand_type, canon_type); -		else -			return btf_compat_enum(cand_type, canon_type); +		return btf_compat_enum(cand_type, canon_type);  	case BTF_KIND_FWD:  	case BTF_KIND_FLOAT: @@ -3994,6 +4093,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,  	case BTF_KIND_PTR:  	case BTF_KIND_TYPEDEF:  	case BTF_KIND_FUNC: +	case BTF_KIND_TYPE_TAG:  		if (cand_type->info != canon_type->info)  			return 0;  		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); @@ -4289,6 +4389,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)  	case BTF_KIND_PTR:  	case BTF_KIND_TYPEDEF:  	case BTF_KIND_FUNC: +	case BTF_KIND_TYPE_TAG:  		ref_type_id = btf_dedup_ref_type(d, t->type);  		if (ref_type_id < 0)  			return ref_type_id; @@ -4595,6 +4696,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct  	case BTF_KIND_FUNC:  	case BTF_KIND_VAR:  	case BTF_KIND_DECL_TAG: +	case BTF_KIND_TYPE_TAG:  		return visit(&t->type, ctx);  	case BTF_KIND_ARRAY: { |