diff options
Diffstat (limited to 'kernel/bpf/syscall.c')
| -rw-r--r-- | kernel/bpf/syscall.c | 22 | 
1 files changed, 15 insertions, 7 deletions
| diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 62f6bced3a3c..afca36f53c49 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)  void *bpf_map_area_alloc(size_t size, int numa_node)  { -	/* We definitely need __GFP_NORETRY, so OOM killer doesn't -	 * trigger under memory pressure as we really just want to -	 * fail instead. +	/* We really just want to fail instead of triggering OOM killer +	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, +	 * which is used for lower order allocation requests. +	 * +	 * It has been observed that higher order allocation requests done by +	 * vmalloc with __GFP_NORETRY being set might fail due to not trying +	 * to reclaim memory from the page cache, thus we set +	 * __GFP_RETRY_MAYFAIL to avoid such situations.  	 */ -	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; + +	const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;  	void *area;  	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { -		area = kmalloc_node(size, GFP_USER | flags, numa_node); +		area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, +				    numa_node);  		if (area != NULL)  			return area;  	} -	return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, -					   __builtin_return_address(0)); +	return __vmalloc_node_flags_caller(size, numa_node, +					   GFP_KERNEL | __GFP_RETRY_MAYFAIL | +					   flags, __builtin_return_address(0));  }  void bpf_map_area_free(void *area) |