diff options
Diffstat (limited to 'kernel/bpf/btf.c')
| -rw-r--r-- | kernel/bpf/btf.c | 27 | 
1 files changed, 25 insertions, 2 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 90c4a32d89ff..821063660d9f 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -218,6 +218,7 @@ enum btf_kfunc_hook {  	BTF_KFUNC_HOOK_SOCKET_FILTER,  	BTF_KFUNC_HOOK_LWT,  	BTF_KFUNC_HOOK_NETFILTER, +	BTF_KFUNC_HOOK_KPROBE,  	BTF_KFUNC_HOOK_MAX,  }; @@ -3464,6 +3465,15 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,  			goto end;  		}  	} +	if (field_mask & BPF_WORKQUEUE) { +		if (!strcmp(name, "bpf_wq")) { +			if (*seen_mask & BPF_WORKQUEUE) +				return -E2BIG; +			*seen_mask |= BPF_WORKQUEUE; +			type = BPF_WORKQUEUE; +			goto end; +		} +	}  	field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");  	field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");  	field_mask_test_name(BPF_RB_ROOT,   "bpf_rb_root"); @@ -3515,6 +3525,7 @@ static int btf_find_struct_field(const struct btf *btf,  		switch (field_type) {  		case BPF_SPIN_LOCK:  		case BPF_TIMER: +		case BPF_WORKQUEUE:  		case BPF_LIST_NODE:  		case BPF_RB_NODE:  		case BPF_REFCOUNT: @@ -3582,6 +3593,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,  		switch (field_type) {  		case BPF_SPIN_LOCK:  		case BPF_TIMER: +		case BPF_WORKQUEUE:  		case BPF_LIST_NODE:  		case BPF_RB_NODE:  		case BPF_REFCOUNT: @@ -3816,6 +3828,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type  	rec->spin_lock_off = -EINVAL;  	rec->timer_off = -EINVAL; +	rec->wq_off = -EINVAL;  	rec->refcount_off = -EINVAL;  	for (i = 0; i < cnt; i++) {  		field_type_size = btf_field_type_size(info_arr[i].type); @@ -3846,6 +3859,11 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type  			/* Cache offset for faster lookup at runtime */  			rec->timer_off = rec->fields[i].offset;  			break; +		case BPF_WORKQUEUE: +			WARN_ON_ONCE(rec->wq_off >= 0); +			/* Cache offset for faster lookup at runtime */ +			rec->wq_off = rec->fields[i].offset; +			break;  		case BPF_REFCOUNT:  			WARN_ON_ONCE(rec->refcount_off >= 0);  			/* Cache offset for faster lookup at runtime */ @@ -5642,8 +5660,8 @@ errout_free:  	return ERR_PTR(err);  } -extern char __weak __start_BTF[]; -extern char __weak __stop_BTF[]; +extern char __start_BTF[]; +extern char __stop_BTF[];  extern struct btf *btf_vmlinux;  #define BPF_MAP_TYPE(_id, _ops) @@ -5971,6 +5989,9 @@ struct btf *btf_parse_vmlinux(void)  	struct btf *btf = NULL;  	int err; +	if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) +		return ERR_PTR(-ENOENT); +  	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);  	if (!env)  		return ERR_PTR(-ENOMEM); @@ -8137,6 +8158,8 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)  		return BTF_KFUNC_HOOK_LWT;  	case BPF_PROG_TYPE_NETFILTER:  		return BTF_KFUNC_HOOK_NETFILTER; +	case BPF_PROG_TYPE_KPROBE: +		return BTF_KFUNC_HOOK_KPROBE;  	default:  		return BTF_KFUNC_HOOK_MAX;  	}  |