diff options
Diffstat (limited to 'kernel/bpf/cgroup.c')
| -rw-r--r-- | kernel/bpf/cgroup.c | 82 | 
1 files changed, 60 insertions, 22 deletions
| diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 6a7d931bbc55..9425c2fb872f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -25,6 +25,7 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);   */  void cgroup_bpf_put(struct cgroup *cgrp)  { +	enum bpf_cgroup_storage_type stype;  	unsigned int type;  	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { @@ -34,8 +35,10 @@ void cgroup_bpf_put(struct cgroup *cgrp)  		list_for_each_entry_safe(pl, tmp, progs, node) {  			list_del(&pl->node);  			bpf_prog_put(pl->prog); -			bpf_cgroup_storage_unlink(pl->storage); -			bpf_cgroup_storage_free(pl->storage); +			for_each_cgroup_storage_type(stype) { +				bpf_cgroup_storage_unlink(pl->storage[stype]); +				bpf_cgroup_storage_free(pl->storage[stype]); +			}  			kfree(pl);  			static_branch_dec(&cgroup_bpf_enabled_key);  		} @@ -97,6 +100,7 @@ static int compute_effective_progs(struct cgroup *cgrp,  				   enum bpf_attach_type type,  				   struct bpf_prog_array __rcu **array)  { +	enum bpf_cgroup_storage_type stype;  	struct bpf_prog_array *progs;  	struct bpf_prog_list *pl;  	struct cgroup *p = cgrp; @@ -125,7 +129,9 @@ static int compute_effective_progs(struct cgroup *cgrp,  				continue;  			progs->items[cnt].prog = pl->prog; -			progs->items[cnt].cgroup_storage = pl->storage; +			for_each_cgroup_storage_type(stype) +				progs->items[cnt].cgroup_storage[stype] = +					pl->storage[stype];  			cnt++;  		}  	} while ((p = cgroup_parent(p))); @@ -232,7 +238,9 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,  {  	struct list_head *progs = &cgrp->bpf.progs[type];  	struct bpf_prog *old_prog = NULL; -	struct bpf_cgroup_storage *storage, *old_storage = NULL; +	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE], +		*old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL}; +	enum bpf_cgroup_storage_type stype;  	struct bpf_prog_list *pl;  	bool pl_was_allocated;  	int err; @@ -254,34 +262,44 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,  	if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)  		return -E2BIG; -	storage = bpf_cgroup_storage_alloc(prog); -	if (IS_ERR(storage)) -		return -ENOMEM; +	for_each_cgroup_storage_type(stype) { +		storage[stype] = bpf_cgroup_storage_alloc(prog, stype); +		if (IS_ERR(storage[stype])) { +			storage[stype] = NULL; +			for_each_cgroup_storage_type(stype) +				bpf_cgroup_storage_free(storage[stype]); +			return -ENOMEM; +		} +	}  	if (flags & BPF_F_ALLOW_MULTI) {  		list_for_each_entry(pl, progs, node) {  			if (pl->prog == prog) {  				/* disallow attaching the same prog twice */ -				bpf_cgroup_storage_free(storage); +				for_each_cgroup_storage_type(stype) +					bpf_cgroup_storage_free(storage[stype]);  				return -EINVAL;  			}  		}  		pl = kmalloc(sizeof(*pl), GFP_KERNEL);  		if (!pl) { -			bpf_cgroup_storage_free(storage); +			for_each_cgroup_storage_type(stype) +				bpf_cgroup_storage_free(storage[stype]);  			return -ENOMEM;  		}  		pl_was_allocated = true;  		pl->prog = prog; -		pl->storage = storage; +		for_each_cgroup_storage_type(stype) +			pl->storage[stype] = storage[stype];  		list_add_tail(&pl->node, progs);  	} else {  		if (list_empty(progs)) {  			pl = kmalloc(sizeof(*pl), GFP_KERNEL);  			if (!pl) { -				bpf_cgroup_storage_free(storage); +				for_each_cgroup_storage_type(stype) +					bpf_cgroup_storage_free(storage[stype]);  				return -ENOMEM;  			}  			pl_was_allocated = true; @@ -289,12 +307,15 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,  		} else {  			pl = list_first_entry(progs, typeof(*pl), node);  			old_prog = pl->prog; -			old_storage = pl->storage; -			bpf_cgroup_storage_unlink(old_storage); +			for_each_cgroup_storage_type(stype) { +				old_storage[stype] = pl->storage[stype]; +				bpf_cgroup_storage_unlink(old_storage[stype]); +			}  			pl_was_allocated = false;  		}  		pl->prog = prog; -		pl->storage = storage; +		for_each_cgroup_storage_type(stype) +			pl->storage[stype] = storage[stype];  	}  	cgrp->bpf.flags[type] = flags; @@ -304,21 +325,27 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,  		goto cleanup;  	static_branch_inc(&cgroup_bpf_enabled_key); -	if (old_storage) -		bpf_cgroup_storage_free(old_storage); +	for_each_cgroup_storage_type(stype) { +		if (!old_storage[stype]) +			continue; +		bpf_cgroup_storage_free(old_storage[stype]); +	}  	if (old_prog) {  		bpf_prog_put(old_prog);  		static_branch_dec(&cgroup_bpf_enabled_key);  	} -	bpf_cgroup_storage_link(storage, cgrp, type); +	for_each_cgroup_storage_type(stype) +		bpf_cgroup_storage_link(storage[stype], cgrp, type);  	return 0;  cleanup:  	/* and cleanup the prog list */  	pl->prog = old_prog; -	bpf_cgroup_storage_free(pl->storage); -	pl->storage = old_storage; -	bpf_cgroup_storage_link(old_storage, cgrp, type); +	for_each_cgroup_storage_type(stype) { +		bpf_cgroup_storage_free(pl->storage[stype]); +		pl->storage[stype] = old_storage[stype]; +		bpf_cgroup_storage_link(old_storage[stype], cgrp, type); +	}  	if (pl_was_allocated) {  		list_del(&pl->node);  		kfree(pl); @@ -339,6 +366,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,  			enum bpf_attach_type type, u32 unused_flags)  {  	struct list_head *progs = &cgrp->bpf.progs[type]; +	enum bpf_cgroup_storage_type stype;  	u32 flags = cgrp->bpf.flags[type];  	struct bpf_prog *old_prog = NULL;  	struct bpf_prog_list *pl; @@ -385,8 +413,10 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,  	/* now can actually delete it from this cgroup list */  	list_del(&pl->node); -	bpf_cgroup_storage_unlink(pl->storage); -	bpf_cgroup_storage_free(pl->storage); +	for_each_cgroup_storage_type(stype) { +		bpf_cgroup_storage_unlink(pl->storage[stype]); +		bpf_cgroup_storage_free(pl->storage[stype]); +	}  	kfree(pl);  	if (list_empty(progs))  		/* last program was detached, reset flags to zero */ @@ -523,6 +553,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,  {  	unsigned int offset = skb->data - skb_network_header(skb);  	struct sock *save_sk; +	void *saved_data_end;  	struct cgroup *cgrp;  	int ret; @@ -536,8 +567,13 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,  	save_sk = skb->sk;  	skb->sk = sk;  	__skb_push(skb, offset); + +	/* compute pointers for the bpf prog */ +	bpf_compute_and_save_data_end(skb, &saved_data_end); +  	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,  				 bpf_prog_run_save_cb); +	bpf_restore_data_end(skb, saved_data_end);  	__skb_pull(skb, offset);  	skb->sk = save_sk;  	return ret == 1 ? 0 : -EPERM; @@ -677,6 +713,8 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  		return &bpf_get_current_uid_gid_proto;  	case BPF_FUNC_get_local_storage:  		return &bpf_get_local_storage_proto; +	case BPF_FUNC_get_current_cgroup_id: +		return &bpf_get_current_cgroup_id_proto;  	case BPF_FUNC_trace_printk:  		if (capable(CAP_SYS_ADMIN))  			return bpf_get_trace_printk_proto(); |