diff options
Diffstat (limited to 'net/bpf/test_run.c')
| -rw-r--r-- | net/bpf/test_run.c | 276 | 
1 files changed, 214 insertions, 62 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 58bcb8c849d5..a5d72c48fb66 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -2,6 +2,7 @@  /* Copyright (c) 2017 Facebook   */  #include <linux/bpf.h> +#include <linux/btf_ids.h>  #include <linux/slab.h>  #include <linux/vmalloc.h>  #include <linux/etherdevice.h> @@ -10,20 +11,86 @@  #include <net/bpf_sk_storage.h>  #include <net/sock.h>  #include <net/tcp.h> +#include <net/net_namespace.h>  #include <linux/error-injection.h>  #include <linux/smp.h> +#include <linux/sock_diag.h>  #define CREATE_TRACE_POINTS  #include <trace/events/bpf_test_run.h> +struct bpf_test_timer { +	enum { NO_PREEMPT, NO_MIGRATE } mode; +	u32 i; +	u64 time_start, time_spent; +}; + +static void bpf_test_timer_enter(struct bpf_test_timer *t) +	__acquires(rcu) +{ +	rcu_read_lock(); +	if (t->mode == NO_PREEMPT) +		preempt_disable(); +	else +		migrate_disable(); + +	t->time_start = ktime_get_ns(); +} + +static void bpf_test_timer_leave(struct bpf_test_timer *t) +	__releases(rcu) +{ +	t->time_start = 0; + +	if (t->mode == NO_PREEMPT) +		preempt_enable(); +	else +		migrate_enable(); +	rcu_read_unlock(); +} + +static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration) +	__must_hold(rcu) +{ +	t->i++; +	if (t->i >= repeat) { +		/* We're done. */ +		t->time_spent += ktime_get_ns() - t->time_start; +		do_div(t->time_spent, t->i); +		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; +		*err = 0; +		goto reset; +	} + +	if (signal_pending(current)) { +		/* During iteration: we've been cancelled, abort. */ +		*err = -EINTR; +		goto reset; +	} + +	if (need_resched()) { +		/* During iteration: we need to reschedule between runs. */ +		t->time_spent += ktime_get_ns() - t->time_start; +		bpf_test_timer_leave(t); +		cond_resched(); +		bpf_test_timer_enter(t); +	} + +	/* Do another round. */ +	return true; + +reset: +	t->i = 0; +	return false; +} +  static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,  			u32 *retval, u32 *time, bool xdp)  {  	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; +	struct bpf_test_timer t = { NO_MIGRATE };  	enum bpf_cgroup_storage_type stype; -	u64 time_start, time_spent = 0; -	int ret = 0; -	u32 i; +	int ret;  	for_each_cgroup_storage_type(stype) {  		storage[stype] = bpf_cgroup_storage_alloc(prog, stype); @@ -38,40 +105,20 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,  	if (!repeat)  		repeat = 1; -	rcu_read_lock(); -	migrate_disable(); -	time_start = ktime_get_ns(); -	for (i = 0; i < repeat; i++) { -		bpf_cgroup_storage_set(storage); +	bpf_test_timer_enter(&t); +	do { +		ret = bpf_cgroup_storage_set(storage); +		if (ret) +			break;  		if (xdp)  			*retval = bpf_prog_run_xdp(prog, ctx);  		else  			*retval = BPF_PROG_RUN(prog, ctx); -		if (signal_pending(current)) { -			ret = -EINTR; -			break; -		} - -		if (need_resched()) { -			time_spent += ktime_get_ns() - time_start; -			migrate_enable(); -			rcu_read_unlock(); - -			cond_resched(); - -			rcu_read_lock(); -			migrate_disable(); -			time_start = ktime_get_ns(); -		} -	} -	time_spent += ktime_get_ns() - time_start; -	migrate_enable(); -	rcu_read_unlock(); - -	do_div(time_spent, repeat); -	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; +		bpf_cgroup_storage_unset(); +	} while (bpf_test_timer_continue(&t, repeat, &ret, time)); +	bpf_test_timer_leave(&t);  	for_each_cgroup_storage_type(stype)  		bpf_cgroup_storage_free(storage[stype]); @@ -167,10 +214,37 @@ int noinline bpf_modify_return_test(int a, int *b)  	*b += 1;  	return a + *b;  } + +u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) +{ +	return a + b + c + d; +} + +int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) +{ +	return a + b; +} + +struct sock * noinline bpf_kfunc_call_test3(struct sock *sk) +{ +	return sk; +} +  __diag_pop();  ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); +BTF_SET_START(test_sk_kfunc_ids) +BTF_ID(func, bpf_kfunc_call_test1) +BTF_ID(func, bpf_kfunc_call_test2) +BTF_ID(func, bpf_kfunc_call_test3) +BTF_SET_END(test_sk_kfunc_ids) + +bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) +{ +	return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id); +} +  static void *bpf_test_init(const union bpf_attr *kattr, u32 size,  			   u32 headroom, u32 tailroom)  { @@ -674,18 +748,17 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,  				     const union bpf_attr *kattr,  				     union bpf_attr __user *uattr)  { +	struct bpf_test_timer t = { NO_PREEMPT };  	u32 size = kattr->test.data_size_in;  	struct bpf_flow_dissector ctx = {};  	u32 repeat = kattr->test.repeat;  	struct bpf_flow_keys *user_ctx;  	struct bpf_flow_keys flow_keys; -	u64 time_start, time_spent = 0;  	const struct ethhdr *eth;  	unsigned int flags = 0;  	u32 retval, duration;  	void *data;  	int ret; -	u32 i;  	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)  		return -EINVAL; @@ -721,48 +794,127 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,  	ctx.data = data;  	ctx.data_end = (__u8 *)data + size; -	rcu_read_lock(); -	preempt_disable(); -	time_start = ktime_get_ns(); -	for (i = 0; i < repeat; i++) { +	bpf_test_timer_enter(&t); +	do {  		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,  					  size, flags); +	} while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); +	bpf_test_timer_leave(&t); -		if (signal_pending(current)) { -			preempt_enable(); -			rcu_read_unlock(); +	if (ret < 0) +		goto out; -			ret = -EINTR; -			goto out; -		} +	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), +			      retval, duration); +	if (!ret) +		ret = bpf_ctx_finish(kattr, uattr, user_ctx, +				     sizeof(struct bpf_flow_keys)); -		if (need_resched()) { -			time_spent += ktime_get_ns() - time_start; -			preempt_enable(); -			rcu_read_unlock(); +out: +	kfree(user_ctx); +	kfree(data); +	return ret; +} -			cond_resched(); +int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, +				union bpf_attr __user *uattr) +{ +	struct bpf_test_timer t = { NO_PREEMPT }; +	struct bpf_prog_array *progs = NULL; +	struct bpf_sk_lookup_kern ctx = {}; +	u32 repeat = kattr->test.repeat; +	struct bpf_sk_lookup *user_ctx; +	u32 retval, duration; +	int ret = -EINVAL; -			rcu_read_lock(); -			preempt_disable(); -			time_start = ktime_get_ns(); -		} +	if (prog->type != BPF_PROG_TYPE_SK_LOOKUP) +		return -EINVAL; + +	if (kattr->test.flags || kattr->test.cpu) +		return -EINVAL; + +	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || +	    kattr->test.data_size_out) +		return -EINVAL; + +	if (!repeat) +		repeat = 1; + +	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); +	if (IS_ERR(user_ctx)) +		return PTR_ERR(user_ctx); + +	if (!user_ctx) +		return -EINVAL; + +	if (user_ctx->sk) +		goto out; + +	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) +		goto out; + +	if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) { +		ret = -ERANGE; +		goto out;  	} -	time_spent += ktime_get_ns() - time_start; -	preempt_enable(); -	rcu_read_unlock(); -	do_div(time_spent, repeat); -	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; +	ctx.family = (u16)user_ctx->family; +	ctx.protocol = (u16)user_ctx->protocol; +	ctx.dport = (u16)user_ctx->local_port; +	ctx.sport = (__force __be16)user_ctx->remote_port; -	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), -			      retval, duration); +	switch (ctx.family) { +	case AF_INET: +		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; +		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; +		break; + +#if IS_ENABLED(CONFIG_IPV6) +	case AF_INET6: +		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; +		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; +		break; +#endif + +	default: +		ret = -EAFNOSUPPORT; +		goto out; +	} + +	progs = bpf_prog_array_alloc(1, GFP_KERNEL); +	if (!progs) { +		ret = -ENOMEM; +		goto out; +	} + +	progs->items[0].prog = prog; + +	bpf_test_timer_enter(&t); +	do { +		ctx.selected_sk = NULL; +		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN); +	} while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); +	bpf_test_timer_leave(&t); + +	if (ret < 0) +		goto out; + +	user_ctx->cookie = 0; +	if (ctx.selected_sk) { +		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { +			ret = -EOPNOTSUPP; +			goto out; +		} + +		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); +	} + +	ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);  	if (!ret) -		ret = bpf_ctx_finish(kattr, uattr, user_ctx, -				     sizeof(struct bpf_flow_keys)); +		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));  out: +	bpf_prog_array_free(progs);  	kfree(user_ctx); -	kfree(data);  	return ret;  }  |