diff options
Diffstat (limited to 'kernel/trace/bpf_trace.c')
| -rw-r--r-- | kernel/trace/bpf_trace.c | 211 | 
1 files changed, 208 insertions, 3 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 68e5cdd24cef..688552df95ca 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -20,6 +20,8 @@  #include <linux/fprobe.h>  #include <linux/bsearch.h>  #include <linux/sort.h> +#include <linux/key.h> +#include <linux/verification.h>  #include <net/bpf_sk_storage.h> @@ -1026,11 +1028,30 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {  	.arg1_type	= ARG_PTR_TO_CTX,  }; +#ifdef CONFIG_X86_KERNEL_IBT +static unsigned long get_entry_ip(unsigned long fentry_ip) +{ +	u32 instr; + +	/* Being extra safe in here in case entry ip is on the page-edge. */ +	if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1)) +		return fentry_ip; +	if (is_endbr(instr)) +		fentry_ip -= ENDBR_INSN_SIZE; +	return fentry_ip; +} +#else +#define get_entry_ip(fentry_ip) fentry_ip +#endif +  BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)  {  	struct kprobe *kp = kprobe_running(); -	return kp ? (uintptr_t)kp->addr : 0; +	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) +		return 0; + +	return get_entry_ip((uintptr_t)kp->addr);  }  static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { @@ -1181,6 +1202,184 @@ static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {  	.arg1_type	= ARG_PTR_TO_CTX,  }; +#ifdef CONFIG_KEYS +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", +		  "kfuncs which will be used in BPF programs"); + +/** + * bpf_lookup_user_key - lookup a key by its serial + * @serial: key handle serial number + * @flags: lookup-specific flags + * + * Search a key with a given *serial* and the provided *flags*. + * If found, increment the reference count of the key by one, and + * return it in the bpf_key structure. + * + * The bpf_key structure must be passed to bpf_key_put() when done + * with it, so that the key reference count is decremented and the + * bpf_key structure is freed. + * + * Permission checks are deferred to the time the key is used by + * one of the available key-specific kfuncs. + * + * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested + * special keyring (e.g. session keyring), if it doesn't yet exist. + * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting + * for the key construction, and to retrieve uninstantiated keys (keys + * without data attached to them). + * + * Return: a bpf_key pointer with a valid key pointer if the key is found, a + *         NULL pointer otherwise. + */ +struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) +{ +	key_ref_t key_ref; +	struct bpf_key *bkey; + +	if (flags & ~KEY_LOOKUP_ALL) +		return NULL; + +	/* +	 * Permission check is deferred until the key is used, as the +	 * intent of the caller is unknown here. +	 */ +	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK); +	if (IS_ERR(key_ref)) +		return NULL; + +	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); +	if (!bkey) { +		key_put(key_ref_to_ptr(key_ref)); +		return NULL; +	} + +	bkey->key = key_ref_to_ptr(key_ref); +	bkey->has_ref = true; + +	return bkey; +} + +/** + * bpf_lookup_system_key - lookup a key by a system-defined ID + * @id: key ID + * + * Obtain a bpf_key structure with a key pointer set to the passed key ID. + * The key pointer is marked as invalid, to prevent bpf_key_put() from + * attempting to decrement the key reference count on that pointer. The key + * pointer set in such way is currently understood only by + * verify_pkcs7_signature(). + * + * Set *id* to one of the values defined in include/linux/verification.h: + * 0 for the primary keyring (immutable keyring of system keys); + * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring + * (where keys can be added only if they are vouched for by existing keys + * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform + * keyring (primarily used by the integrity subsystem to verify a kexec'ed + * kerned image and, possibly, the initramfs signature). + * + * Return: a bpf_key pointer with an invalid key pointer set from the + *         pre-determined ID on success, a NULL pointer otherwise + */ +struct bpf_key *bpf_lookup_system_key(u64 id) +{ +	struct bpf_key *bkey; + +	if (system_keyring_id_check(id) < 0) +		return NULL; + +	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); +	if (!bkey) +		return NULL; + +	bkey->key = (struct key *)(unsigned long)id; +	bkey->has_ref = false; + +	return bkey; +} + +/** + * bpf_key_put - decrement key reference count if key is valid and free bpf_key + * @bkey: bpf_key structure + * + * Decrement the reference count of the key inside *bkey*, if the pointer + * is valid, and free *bkey*. + */ +void bpf_key_put(struct bpf_key *bkey) +{ +	if (bkey->has_ref) +		key_put(bkey->key); + +	kfree(bkey); +} + +#ifdef CONFIG_SYSTEM_DATA_VERIFICATION +/** + * bpf_verify_pkcs7_signature - verify a PKCS#7 signature + * @data_ptr: data to verify + * @sig_ptr: signature of the data + * @trusted_keyring: keyring with keys trusted for signature verification + * + * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* + * with keys in a keyring referenced by *trusted_keyring*. + * + * Return: 0 on success, a negative value on error. + */ +int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, +			       struct bpf_dynptr_kern *sig_ptr, +			       struct bpf_key *trusted_keyring) +{ +	int ret; + +	if (trusted_keyring->has_ref) { +		/* +		 * Do the permission check deferred in bpf_lookup_user_key(). +		 * See bpf_lookup_user_key() for more details. +		 * +		 * A call to key_task_permission() here would be redundant, as +		 * it is already done by keyring_search() called by +		 * find_asymmetric_key(). +		 */ +		ret = key_validate(trusted_keyring->key); +		if (ret < 0) +			return ret; +	} + +	return verify_pkcs7_signature(data_ptr->data, +				      bpf_dynptr_get_size(data_ptr), +				      sig_ptr->data, +				      bpf_dynptr_get_size(sig_ptr), +				      trusted_keyring->key, +				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL, +				      NULL); +} +#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ + +__diag_pop(); + +BTF_SET8_START(key_sig_kfunc_set) +BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) +#ifdef CONFIG_SYSTEM_DATA_VERIFICATION +BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) +#endif +BTF_SET8_END(key_sig_kfunc_set) + +static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { +	.owner = THIS_MODULE, +	.set = &key_sig_kfunc_set, +}; + +static int __init bpf_key_sig_kfuncs_init(void) +{ +	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, +					 &bpf_key_sig_kfunc_set); +} + +late_initcall(bpf_key_sig_kfuncs_init); +#endif /* CONFIG_KEYS */ +  static const struct bpf_func_proto *  bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  { @@ -2042,9 +2241,15 @@ static __always_inline  void __bpf_trace_run(struct bpf_prog *prog, u64 *args)  {  	cant_sleep(); +	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { +		bpf_prog_inc_misses_counter(prog); +		goto out; +	}  	rcu_read_lock();  	(void) bpf_prog_run(prog, args);  	rcu_read_unlock(); +out: +	this_cpu_dec(*(prog->active));  }  #define UNPACK(...)			__VA_ARGS__ @@ -2414,13 +2619,13 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,  }  static void -kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, +kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,  			  struct pt_regs *regs)  {  	struct bpf_kprobe_multi_link *link;  	link = container_of(fp, struct bpf_kprobe_multi_link, fp); -	kprobe_multi_link_prog_run(link, entry_ip, regs); +	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);  }  static int symbols_cmp_r(const void *a, const void *b, const void *priv)  |