diff options
Diffstat (limited to 'kernel/bpf/arraymap.c')
| -rw-r--r-- | kernel/bpf/arraymap.c | 46 | 
1 files changed, 28 insertions, 18 deletions
| diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index fe40d3b9458f..624527401d4d 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -70,10 +70,8 @@ int array_map_alloc_check(union bpf_attr *attr)  	    attr->map_flags & BPF_F_PRESERVE_ELEMS)  		return -EINVAL; -	if (attr->value_size > KMALLOC_MAX_SIZE) -		/* if value_size is bigger, the user space won't be able to -		 * access the elements. -		 */ +	/* avoid overflow on round_up(map->value_size) */ +	if (attr->value_size > INT_MAX)  		return -E2BIG;  	return 0; @@ -156,6 +154,11 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)  	return &array->map;  } +static void *array_map_elem_ptr(struct bpf_array* array, u32 index) +{ +	return array->value + (u64)array->elem_size * index; +} +  /* Called from syscall or from eBPF program */  static void *array_map_lookup_elem(struct bpf_map *map, void *key)  { @@ -165,7 +168,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)  	if (unlikely(index >= array->map.max_entries))  		return NULL; -	return array->value + array->elem_size * (index & array->index_mask); +	return array->value + (u64)array->elem_size * (index & array->index_mask);  }  static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, @@ -203,7 +206,7 @@ static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)  {  	struct bpf_array *array = container_of(map, struct bpf_array, map);  	struct bpf_insn *insn = insn_buf; -	u32 elem_size = round_up(map->value_size, 8); +	u32 elem_size = array->elem_size;  	const int ret = BPF_REG_0;  	const int map_ptr = BPF_REG_1;  	const int index = BPF_REG_2; @@ -272,7 +275,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)  	 * access 'value_size' of them, so copying rounded areas  	 * will not leak any kernel data  	 */ -	size = round_up(map->value_size, 8); +	size = array->elem_size;  	rcu_read_lock();  	pptr = array->pptrs[index & array->index_mask];  	for_each_possible_cpu(cpu) { @@ -339,7 +342,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,  		       value, map->value_size);  	} else {  		val = array->value + -			array->elem_size * (index & array->index_mask); +			(u64)array->elem_size * (index & array->index_mask);  		if (map_flags & BPF_F_LOCK)  			copy_map_value_locked(map, val, value, false);  		else @@ -376,7 +379,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,  	 * returned or zeros which were zero-filled by percpu_alloc,  	 * so no kernel data leaks possible  	 */ -	size = round_up(map->value_size, 8); +	size = array->elem_size;  	rcu_read_lock();  	pptr = array->pptrs[index & array->index_mask];  	for_each_possible_cpu(cpu) { @@ -408,8 +411,7 @@ static void array_map_free_timers(struct bpf_map *map)  		return;  	for (i = 0; i < array->map.max_entries; i++) -		bpf_timer_cancel_and_free(array->value + array->elem_size * i + -					  map->timer_off); +		bpf_timer_cancel_and_free(array_map_elem_ptr(array, i) + map->timer_off);  }  /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ @@ -420,7 +422,7 @@ static void array_map_free(struct bpf_map *map)  	if (map_value_has_kptrs(map)) {  		for (i = 0; i < array->map.max_entries; i++) -			bpf_map_free_kptrs(map, array->value + array->elem_size * i); +			bpf_map_free_kptrs(map, array_map_elem_ptr(array, i));  		bpf_map_free_kptr_off_tab(map);  	} @@ -556,7 +558,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)  	index = info->index & array->index_mask;  	if (info->percpu_value_buf)  	       return array->pptrs[index]; -	return array->value + array->elem_size * index; +	return array_map_elem_ptr(array, index);  }  static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) @@ -575,7 +577,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)  	index = info->index & array->index_mask;  	if (info->percpu_value_buf)  	       return array->pptrs[index]; -	return array->value + array->elem_size * index; +	return array_map_elem_ptr(array, index);  }  static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) @@ -583,6 +585,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)  	struct bpf_iter_seq_array_map_info *info = seq->private;  	struct bpf_iter__bpf_map_elem ctx = {};  	struct bpf_map *map = info->map; +	struct bpf_array *array = container_of(map, struct bpf_array, map);  	struct bpf_iter_meta meta;  	struct bpf_prog *prog;  	int off = 0, cpu = 0; @@ -603,7 +606,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)  			ctx.value = v;  		} else {  			pptr = v; -			size = round_up(map->value_size, 8); +			size = array->elem_size;  			for_each_possible_cpu(cpu) {  				bpf_long_memcpy(info->percpu_value_buf + off,  						per_cpu_ptr(pptr, cpu), @@ -633,11 +636,12 @@ static int bpf_iter_init_array_map(void *priv_data,  {  	struct bpf_iter_seq_array_map_info *seq_info = priv_data;  	struct bpf_map *map = aux->map; +	struct bpf_array *array = container_of(map, struct bpf_array, map);  	void *value_buf;  	u32 buf_size;  	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { -		buf_size = round_up(map->value_size, 8) * num_possible_cpus(); +		buf_size = array->elem_size * num_possible_cpus();  		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);  		if (!value_buf)  			return -ENOMEM; @@ -645,6 +649,11 @@ static int bpf_iter_init_array_map(void *priv_data,  		seq_info->percpu_value_buf = value_buf;  	} +	/* bpf_iter_attach_map() acquires a map uref, and the uref may be +	 * released before or in the middle of iterating map elements, so +	 * acquire an extra map uref for iterator. +	 */ +	bpf_map_inc_with_uref(map);  	seq_info->map = map;  	return 0;  } @@ -653,6 +662,7 @@ static void bpf_iter_fini_array_map(void *priv_data)  {  	struct bpf_iter_seq_array_map_info *seq_info = priv_data; +	bpf_map_put_with_uref(seq_info->map);  	kfree(seq_info->percpu_value_buf);  } @@ -690,7 +700,7 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_  		if (is_percpu)  			val = this_cpu_ptr(array->pptrs[i]);  		else -			val = array->value + array->elem_size * i; +			val = array_map_elem_ptr(array, i);  		num_elems++;  		key = i;  		ret = callback_fn((u64)(long)map, (u64)(long)&key, @@ -1322,7 +1332,7 @@ static int array_of_map_gen_lookup(struct bpf_map *map,  				   struct bpf_insn *insn_buf)  {  	struct bpf_array *array = container_of(map, struct bpf_array, map); -	u32 elem_size = round_up(map->value_size, 8); +	u32 elem_size = array->elem_size;  	struct bpf_insn *insn = insn_buf;  	const int ret = BPF_REG_0;  	const int map_ptr = BPF_REG_1; |