aboutsummaryrefslogtreecommitdiff
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2020-10-12 16:16:50 -0700
committerJakub Kicinski <kuba@kernel.org>2020-10-12 16:16:50 -0700
commitccdf7fae3afaeaf0e5dd03311b86ffa56adf85ae (patch)
tree3028901b29bf4ab04cd50d61da4fecdb20b182b6 /kernel/bpf/syscall.c
parenta308283fdbf712b30061d2b4567530eb9e8dc1b4 (diff)
parent376dcfe3a4e5a5475a84e6b5f926066a8614f887 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-10-12 The main changes are: 1) The BPF verifier improvements to track register allocation pattern, from Alexei and Yonghong. 2) libbpf relocation support for different size load/store, from Andrii. 3) bpf_redirect_peer() helper and support for inner map array with different max_entries, from Daniel. 4) BPF support for per-cpu variables, form Hao. 5) sockmap improvements, from John. ==================== Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index f1528c2a6927..1110ecd7d1f3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -4323,8 +4323,10 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
used_maps_old = prog->aux->used_maps;
for (i = 0; i < prog->aux->used_map_cnt; i++)
- if (used_maps_old[i] == map)
+ if (used_maps_old[i] == map) {
+ bpf_map_put(map);
goto out_unlock;
+ }
used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
sizeof(used_maps_new[0]),