aboutsummaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2023-10-11 15:37:24 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2023-10-16 13:49:18 +0200
commit2d78928c9cf7bee08c3e2344e6e1755412855448 (patch)
treed94e87cd0f8b942aabce0c87eff573704774dd01 /tools/testing/selftests/bpf/progs/percpu_alloc_array.c
parent0e10fd4b7a6dd03cf6d1da293d5d50082917f0e0 (diff)
selftests/bpf: Improve percpu_alloc test robustness
Make these non-serial tests filter BPF programs by intended PID of a test runner process. This makes it isolated from other parallel tests that might interfere accidentally. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: John Fastabend <john.fastabend@gmail.com> Acked-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/bpf/20231011223728.3188086-2-andrii@kernel.org
Diffstat (limited to 'tools/testing/selftests/bpf/progs/percpu_alloc_array.c')
-rw-r--r--tools/testing/selftests/bpf/progs/percpu_alloc_array.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
index bbc45346e006..37c2d2608ec0 100644
--- a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
+++ b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c
@@ -71,6 +71,7 @@ int BPF_PROG(test_array_map_2)
}
int cpu0_field_d, sum_field_c;
+int my_pid;
/* Summarize percpu data */
SEC("?fentry/bpf_fentry_test3")
@@ -81,6 +82,9 @@ int BPF_PROG(test_array_map_3)
struct val_t *v;
struct elem *e;
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;
@@ -130,6 +134,9 @@ int BPF_PROG(test_array_map_10)
struct val_t *v;
struct elem *e;
+ if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
+ return 0;
+
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;