diff options
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/task_local_storage.c | 98 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c | 47 |
2 files changed, 145 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index 99a42a2b6e14..ae535f5de6a2 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -3,12 +3,15 @@ #define _GNU_SOURCE /* See feature_test_macros(7) */ #include <unistd.h> +#include <sched.h> +#include <pthread.h> #include <sys/syscall.h> /* For SYS_xxx definitions */ #include <sys/types.h> #include <test_progs.h> #include "task_local_storage.skel.h" #include "task_local_storage_exit_creds.skel.h" #include "task_ls_recursion.skel.h" +#include "task_storage_nodeadlock.skel.h" static void test_sys_enter_exit(void) { @@ -93,6 +96,99 @@ out: task_ls_recursion__destroy(skel); } +static bool stop; + +static void waitall(const pthread_t *tids, int nr) +{ + int i; + + stop = true; + for (i = 0; i < nr; i++) + pthread_join(tids[i], NULL); +} + +static void *sock_create_loop(void *arg) +{ + struct task_storage_nodeadlock *skel = arg; + int fd; + + while (!stop) { + fd = socket(AF_INET, SOCK_STREAM, 0); + close(fd); + if (skel->bss->nr_get_errs || skel->bss->nr_del_errs) + stop = true; + } + + return NULL; +} + +static void test_nodeadlock(void) +{ + struct task_storage_nodeadlock *skel; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + const int nr_threads = 32; + pthread_t tids[nr_threads]; + int i, prog_fd, err; + cpu_set_t old, new; + + /* Pin all threads to one cpu to increase the chance of preemption + * in a sleepable bpf prog. + */ + CPU_ZERO(&new); + CPU_SET(0, &new); + err = sched_getaffinity(getpid(), sizeof(old), &old); + if (!ASSERT_OK(err, "getaffinity")) + return; + err = sched_setaffinity(getpid(), sizeof(new), &new); + if (!ASSERT_OK(err, "setaffinity")) + return; + + skel = task_storage_nodeadlock__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto done; + + /* Unnecessary recursion and deadlock detection are reproducible + * in the preemptible kernel. + */ + if (!skel->kconfig->CONFIG_PREEMPT) { + test__skip(); + goto done; + } + + err = task_storage_nodeadlock__attach(skel); + ASSERT_OK(err, "attach prog"); + + for (i = 0; i < nr_threads; i++) { + err = pthread_create(&tids[i], NULL, sock_create_loop, skel); + if (err) { + /* Only assert once here to avoid excessive + * PASS printing during test failure. + */ + ASSERT_OK(err, "pthread_create"); + waitall(tids, i); + goto done; + } + } + + /* With 32 threads, 1s is enough to reproduce the issue */ + sleep(1); + waitall(tids, nr_threads); + + info_len = sizeof(info); + prog_fd = bpf_program__fd(skel->progs.socket_post_create); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + ASSERT_OK(err, "get prog info"); + ASSERT_EQ(info.recursion_misses, 0, "prog recursion"); + + ASSERT_EQ(skel->bss->nr_get_errs, 0, "bpf_task_storage_get busy"); + ASSERT_EQ(skel->bss->nr_del_errs, 0, "bpf_task_storage_delete busy"); + +done: + task_storage_nodeadlock__destroy(skel); + sched_setaffinity(getpid(), sizeof(old), &old); +} + void test_task_local_storage(void) { if (test__start_subtest("sys_enter_exit")) @@ -101,4 +197,6 @@ void test_task_local_storage(void) test_exit_creds(); if (test__start_subtest("recursion")) test_recursion(); + if (test__start_subtest("nodeadlock")) + test_nodeadlock(); } diff --git a/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c new file mode 100644 index 000000000000..ea2dbb80f7b3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +#ifndef EBUSY +#define EBUSY 16 +#endif + +extern bool CONFIG_PREEMPT __kconfig __weak; +int nr_get_errs = 0; +int nr_del_errs = 0; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); +} task_storage SEC(".maps"); + +SEC("lsm.s/socket_post_create") +int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, + int protocol, int kern) +{ + struct task_struct *task; + int ret, zero = 0; + int *value; + + if (!CONFIG_PREEMPT) + return 0; + + task = bpf_get_current_task_btf(); + value = bpf_task_storage_get(&task_storage, task, &zero, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!value) + __sync_fetch_and_add(&nr_get_errs, 1); + + ret = bpf_task_storage_delete(&task_storage, + bpf_get_current_task_btf()); + if (ret == -EBUSY) + __sync_fetch_and_add(&nr_del_errs, 1); + + return 0; +} |