diff options
Diffstat (limited to 'kernel/bpf/arraymap.c')
| -rw-r--r-- | kernel/bpf/arraymap.c | 29 | 
1 files changed, 10 insertions, 19 deletions
| diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 832b2659e96e..484706959556 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -306,14 +306,6 @@ static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key  	return 0;  } -static void check_and_free_fields(struct bpf_array *arr, void *val) -{ -	if (map_value_has_timer(&arr->map)) -		bpf_timer_cancel_and_free(val + arr->map.timer_off); -	if (map_value_has_kptrs(&arr->map)) -		bpf_map_free_kptrs(&arr->map, val); -} -  /* Called from syscall or from eBPF program */  static int array_map_update_elem(struct bpf_map *map, void *key, void *value,  				 u64 map_flags) @@ -335,13 +327,13 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,  		return -EEXIST;  	if (unlikely((map_flags & BPF_F_LOCK) && -		     !map_value_has_spin_lock(map))) +		     !btf_record_has_field(map->record, BPF_SPIN_LOCK)))  		return -EINVAL;  	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {  		val = this_cpu_ptr(array->pptrs[index & array->index_mask]);  		copy_map_value(map, val, value); -		check_and_free_fields(array, val); +		bpf_obj_free_fields(array->map.record, val);  	} else {  		val = array->value +  			(u64)array->elem_size * (index & array->index_mask); @@ -349,7 +341,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,  			copy_map_value_locked(map, val, value, false);  		else  			copy_map_value(map, val, value); -		check_and_free_fields(array, val); +		bpf_obj_free_fields(array->map.record, val);  	}  	return 0;  } @@ -386,7 +378,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,  	pptr = array->pptrs[index & array->index_mask];  	for_each_possible_cpu(cpu) {  		copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); -		check_and_free_fields(array, per_cpu_ptr(pptr, cpu)); +		bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));  		off += size;  	}  	rcu_read_unlock(); @@ -409,12 +401,12 @@ static void array_map_free_timers(struct bpf_map *map)  	struct bpf_array *array = container_of(map, struct bpf_array, map);  	int i; -	/* We don't reset or free kptr on uref dropping to zero. */ -	if (!map_value_has_timer(map)) +	/* We don't reset or free fields other than timer on uref dropping to zero. */ +	if (!btf_record_has_field(map->record, BPF_TIMER))  		return;  	for (i = 0; i < array->map.max_entries; i++) -		bpf_timer_cancel_and_free(array_map_elem_ptr(array, i) + map->timer_off); +		bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));  }  /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ @@ -423,22 +415,21 @@ static void array_map_free(struct bpf_map *map)  	struct bpf_array *array = container_of(map, struct bpf_array, map);  	int i; -	if (map_value_has_kptrs(map)) { +	if (!IS_ERR_OR_NULL(map->record)) {  		if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {  			for (i = 0; i < array->map.max_entries; i++) {  				void __percpu *pptr = array->pptrs[i & array->index_mask];  				int cpu;  				for_each_possible_cpu(cpu) { -					bpf_map_free_kptrs(map, per_cpu_ptr(pptr, cpu)); +					bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));  					cond_resched();  				}  			}  		} else {  			for (i = 0; i < array->map.max_entries; i++) -				bpf_map_free_kptrs(map, array_map_elem_ptr(array, i)); +				bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));  		} -		bpf_map_free_kptr_off_tab(map);  	}  	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) |