diff options
Diffstat (limited to 'kernel/bpf/core.c')
| -rw-r--r-- | kernel/bpf/core.c | 30 | 
1 files changed, 19 insertions, 11 deletions
| diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ba3fff17e2f9..e2d256c82072 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -34,7 +34,9 @@  #include <linux/log2.h>  #include <linux/bpf_verifier.h>  #include <linux/nodemask.h> +#include <linux/nospec.h>  #include <linux/bpf_mem_alloc.h> +#include <linux/memcontrol.h>  #include <asm/barrier.h>  #include <asm/unaligned.h> @@ -87,7 +89,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns  struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)  { -	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; +	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);  	struct bpf_prog_aux *aux;  	struct bpf_prog *fp; @@ -96,12 +98,12 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag  	if (fp == NULL)  		return NULL; -	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags); +	aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));  	if (aux == NULL) {  		vfree(fp);  		return NULL;  	} -	fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags); +	fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));  	if (!fp->active) {  		vfree(fp);  		kfree(aux); @@ -126,7 +128,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag  struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)  { -	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; +	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);  	struct bpf_prog *prog;  	int cpu; @@ -159,7 +161,7 @@ int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)  	prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,  					  sizeof(*prog->aux->jited_linfo), -					  GFP_KERNEL_ACCOUNT | __GFP_NOWARN); +					  bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));  	if (!prog->aux->jited_linfo)  		return -ENOMEM; @@ -234,7 +236,7 @@ void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,  struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,  				  gfp_t gfp_extra_flags)  { -	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags; +	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);  	struct bpf_prog *fp;  	u32 pages; @@ -970,7 +972,7 @@ static int __init bpf_jit_charge_init(void)  {  	/* Only used as heuristic here to derive limit. */  	bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); -	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, +	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,  					    PAGE_SIZE), LONG_MAX);  	return 0;  } @@ -1910,9 +1912,7 @@ out:  		 * reuse preexisting logic from Spectre v1 mitigation that  		 * happens to produce the required code on x86 for v4 as well.  		 */ -#ifdef CONFIG_X86  		barrier_nospec(); -#endif  		CONT;  #define LDST(SIZEOP, SIZE)						\  	STX_MEM_##SIZEOP:						\ @@ -2096,6 +2096,14 @@ bool bpf_prog_map_compatible(struct bpf_map *map,  	if (fp->kprobe_override)  		return false; +	/* XDP programs inserted into maps are not guaranteed to run on +	 * a particular netdev (and can run outside driver context entirely +	 * in the case of devmap and cpumap). Until device checks +	 * are implemented, prohibit adding dev-bound programs to program maps. +	 */ +	if (bpf_prog_is_dev_bound(fp->aux)) +		return false; +  	spin_lock(&map->owner.lock);  	if (!map->owner.type) {  		/* There's no owner yet where we could check for @@ -2182,7 +2190,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)  	 * valid program, which in this case would simply not  	 * be JITed, but falls back to the interpreter.  	 */ -	if (!bpf_prog_is_dev_bound(fp->aux)) { +	if (!bpf_prog_is_offloaded(fp->aux)) {  		*err = bpf_prog_alloc_jited_linfo(fp);  		if (*err)  			return fp; @@ -2554,7 +2562,7 @@ static void bpf_prog_free_deferred(struct work_struct *work)  	bpf_free_used_maps(aux);  	bpf_free_used_btfs(aux);  	if (bpf_prog_is_dev_bound(aux)) -		bpf_prog_offload_destroy(aux->prog); +		bpf_prog_dev_bound_destroy(aux->prog);  #ifdef CONFIG_PERF_EVENTS  	if (aux->prog->has_callchain_buf)  		put_callchain_buffers(); |