diff options
Diffstat (limited to 'kernel/bpf/syscall.c')
| -rw-r--r-- | kernel/bpf/syscall.c | 198 | 
1 files changed, 192 insertions, 6 deletions
| diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 43f95d190eea..4ca46df19c9a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -203,11 +203,13 @@ static int bpf_map_alloc_id(struct bpf_map *map)  {  	int id; +	idr_preload(GFP_KERNEL);  	spin_lock_bh(&map_idr_lock);  	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);  	if (id > 0)  		map->id = id;  	spin_unlock_bh(&map_idr_lock); +	idr_preload_end();  	if (WARN_ON_ONCE(!id))  		return -ENOSPC; @@ -940,11 +942,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)  {  	int id; +	idr_preload(GFP_KERNEL);  	spin_lock_bh(&prog_idr_lock);  	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);  	if (id > 0)  		prog->aux->id = id;  	spin_unlock_bh(&prog_idr_lock); +	idr_preload_end();  	/* id is in [1, INT_MAX) */  	if (WARN_ON_ONCE(!id)) @@ -1167,8 +1171,63 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,  }  EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); +/* Initially all BPF programs could be loaded w/o specifying + * expected_attach_type. Later for some of them specifying expected_attach_type + * at load time became required so that program could be validated properly. + * Programs of types that are allowed to be loaded both w/ and w/o (for + * backward compatibility) expected_attach_type, should have the default attach + * type assigned to expected_attach_type for the latter case, so that it can be + * validated later at attach time. + * + * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if + * prog type requires it but has some attach types that have to be backward + * compatible. + */ +static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) +{ +	switch (attr->prog_type) { +	case BPF_PROG_TYPE_CGROUP_SOCK: +		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't +		 * exist so checking for non-zero is the way to go here. +		 */ +		if (!attr->expected_attach_type) +			attr->expected_attach_type = +				BPF_CGROUP_INET_SOCK_CREATE; +		break; +	} +} + +static int +bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, +				enum bpf_attach_type expected_attach_type) +{ +	switch (prog_type) { +	case BPF_PROG_TYPE_CGROUP_SOCK: +		switch (expected_attach_type) { +		case BPF_CGROUP_INET_SOCK_CREATE: +		case BPF_CGROUP_INET4_POST_BIND: +		case BPF_CGROUP_INET6_POST_BIND: +			return 0; +		default: +			return -EINVAL; +		} +	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: +		switch (expected_attach_type) { +		case BPF_CGROUP_INET4_BIND: +		case BPF_CGROUP_INET6_BIND: +		case BPF_CGROUP_INET4_CONNECT: +		case BPF_CGROUP_INET6_CONNECT: +			return 0; +		default: +			return -EINVAL; +		} +	default: +		return 0; +	} +} +  /* last field in 'union bpf_attr' used by this command */ -#define	BPF_PROG_LOAD_LAST_FIELD prog_ifindex +#define	BPF_PROG_LOAD_LAST_FIELD expected_attach_type  static int bpf_prog_load(union bpf_attr *attr)  { @@ -1205,11 +1264,17 @@ static int bpf_prog_load(union bpf_attr *attr)  	    !capable(CAP_SYS_ADMIN))  		return -EPERM; +	bpf_prog_load_fixup_attach_type(attr); +	if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type)) +		return -EINVAL; +  	/* plain bpf_prog allocation */  	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);  	if (!prog)  		return -ENOMEM; +	prog->expected_attach_type = attr->expected_attach_type; +  	prog->aux->offload_requested = !!attr->prog_ifindex;  	err = security_bpf_prog_alloc(prog->aux); @@ -1311,11 +1376,99 @@ static int bpf_obj_get(const union bpf_attr *attr)  				attr->file_flags);  } +struct bpf_raw_tracepoint { +	struct bpf_raw_event_map *btp; +	struct bpf_prog *prog; +}; + +static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) +{ +	struct bpf_raw_tracepoint *raw_tp = filp->private_data; + +	if (raw_tp->prog) { +		bpf_probe_unregister(raw_tp->btp, raw_tp->prog); +		bpf_prog_put(raw_tp->prog); +	} +	kfree(raw_tp); +	return 0; +} + +static const struct file_operations bpf_raw_tp_fops = { +	.release	= bpf_raw_tracepoint_release, +	.read		= bpf_dummy_read, +	.write		= bpf_dummy_write, +}; + +#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd + +static int bpf_raw_tracepoint_open(const union bpf_attr *attr) +{ +	struct bpf_raw_tracepoint *raw_tp; +	struct bpf_raw_event_map *btp; +	struct bpf_prog *prog; +	char tp_name[128]; +	int tp_fd, err; + +	if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name), +			      sizeof(tp_name) - 1) < 0) +		return -EFAULT; +	tp_name[sizeof(tp_name) - 1] = 0; + +	btp = bpf_find_raw_tracepoint(tp_name); +	if (!btp) +		return -ENOENT; + +	raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); +	if (!raw_tp) +		return -ENOMEM; +	raw_tp->btp = btp; + +	prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd, +				 BPF_PROG_TYPE_RAW_TRACEPOINT); +	if (IS_ERR(prog)) { +		err = PTR_ERR(prog); +		goto out_free_tp; +	} + +	err = bpf_probe_register(raw_tp->btp, prog); +	if (err) +		goto out_put_prog; + +	raw_tp->prog = prog; +	tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, +				 O_CLOEXEC); +	if (tp_fd < 0) { +		bpf_probe_unregister(raw_tp->btp, prog); +		err = tp_fd; +		goto out_put_prog; +	} +	return tp_fd; + +out_put_prog: +	bpf_prog_put(prog); +out_free_tp: +	kfree(raw_tp); +	return err; +} +  #ifdef CONFIG_CGROUP_BPF +static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, +					     enum bpf_attach_type attach_type) +{ +	switch (prog->type) { +	case BPF_PROG_TYPE_CGROUP_SOCK: +	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: +		return attach_type == prog->expected_attach_type ? 0 : -EINVAL; +	default: +		return 0; +	} +} +  #define BPF_PROG_ATTACH_LAST_FIELD attach_flags -static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach) +static int sockmap_get_from_fd(const union bpf_attr *attr, +			       int type, bool attach)  {  	struct bpf_prog *prog = NULL;  	int ufd = attr->target_fd; @@ -1329,8 +1482,7 @@ static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)  		return PTR_ERR(map);  	if (attach) { -		prog = bpf_prog_get_type(attr->attach_bpf_fd, -					 BPF_PROG_TYPE_SK_SKB); +		prog = bpf_prog_get_type(attr->attach_bpf_fd, type);  		if (IS_ERR(prog)) {  			fdput(f);  			return PTR_ERR(prog); @@ -1374,17 +1526,27 @@ static int bpf_prog_attach(const union bpf_attr *attr)  		ptype = BPF_PROG_TYPE_CGROUP_SKB;  		break;  	case BPF_CGROUP_INET_SOCK_CREATE: +	case BPF_CGROUP_INET4_POST_BIND: +	case BPF_CGROUP_INET6_POST_BIND:  		ptype = BPF_PROG_TYPE_CGROUP_SOCK;  		break; +	case BPF_CGROUP_INET4_BIND: +	case BPF_CGROUP_INET6_BIND: +	case BPF_CGROUP_INET4_CONNECT: +	case BPF_CGROUP_INET6_CONNECT: +		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; +		break;  	case BPF_CGROUP_SOCK_OPS:  		ptype = BPF_PROG_TYPE_SOCK_OPS;  		break;  	case BPF_CGROUP_DEVICE:  		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;  		break; +	case BPF_SK_MSG_VERDICT: +		return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);  	case BPF_SK_SKB_STREAM_PARSER:  	case BPF_SK_SKB_STREAM_VERDICT: -		return sockmap_get_from_fd(attr, true); +		return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);  	default:  		return -EINVAL;  	} @@ -1393,6 +1555,11 @@ static int bpf_prog_attach(const union bpf_attr *attr)  	if (IS_ERR(prog))  		return PTR_ERR(prog); +	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { +		bpf_prog_put(prog); +		return -EINVAL; +	} +  	cgrp = cgroup_get_from_fd(attr->target_fd);  	if (IS_ERR(cgrp)) {  		bpf_prog_put(prog); @@ -1429,17 +1596,27 @@ static int bpf_prog_detach(const union bpf_attr *attr)  		ptype = BPF_PROG_TYPE_CGROUP_SKB;  		break;  	case BPF_CGROUP_INET_SOCK_CREATE: +	case BPF_CGROUP_INET4_POST_BIND: +	case BPF_CGROUP_INET6_POST_BIND:  		ptype = BPF_PROG_TYPE_CGROUP_SOCK;  		break; +	case BPF_CGROUP_INET4_BIND: +	case BPF_CGROUP_INET6_BIND: +	case BPF_CGROUP_INET4_CONNECT: +	case BPF_CGROUP_INET6_CONNECT: +		ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; +		break;  	case BPF_CGROUP_SOCK_OPS:  		ptype = BPF_PROG_TYPE_SOCK_OPS;  		break;  	case BPF_CGROUP_DEVICE:  		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;  		break; +	case BPF_SK_MSG_VERDICT: +		return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);  	case BPF_SK_SKB_STREAM_PARSER:  	case BPF_SK_SKB_STREAM_VERDICT: -		return sockmap_get_from_fd(attr, false); +		return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);  	default:  		return -EINVAL;  	} @@ -1478,6 +1655,12 @@ static int bpf_prog_query(const union bpf_attr *attr,  	case BPF_CGROUP_INET_INGRESS:  	case BPF_CGROUP_INET_EGRESS:  	case BPF_CGROUP_INET_SOCK_CREATE: +	case BPF_CGROUP_INET4_BIND: +	case BPF_CGROUP_INET6_BIND: +	case BPF_CGROUP_INET4_POST_BIND: +	case BPF_CGROUP_INET6_POST_BIND: +	case BPF_CGROUP_INET4_CONNECT: +	case BPF_CGROUP_INET6_CONNECT:  	case BPF_CGROUP_SOCK_OPS:  	case BPF_CGROUP_DEVICE:  		break; @@ -1917,6 +2100,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz  	case BPF_OBJ_GET_INFO_BY_FD:  		err = bpf_obj_get_info_by_fd(&attr, uattr);  		break; +	case BPF_RAW_TRACEPOINT_OPEN: +		err = bpf_raw_tracepoint_open(&attr); +		break;  	default:  		err = -EINVAL;  		break; |