diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
40 files changed, 2789 insertions, 379 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c new file mode 100644 index 000000000000..b17bfa0e0aac --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include "test_progs.h" +#include "testing_helpers.h" + +static void init_test_filter_set(struct test_filter_set *set) +{ + set->cnt = 0; + set->tests = NULL; +} + +static void free_test_filter_set(struct test_filter_set *set) +{ + int i, j; + + for (i = 0; i < set->cnt; i++) { + for (j = 0; j < set->tests[i].subtest_cnt; j++) + free((void *)set->tests[i].subtests[j]); + free(set->tests[i].subtests); + free(set->tests[i].name); + } + + free(set->tests); + init_test_filter_set(set); +} + +static void test_parse_test_list(void) +{ + struct test_filter_set set; + + init_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing", &set, true), "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "test filters count")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing,bpf_cookie", &set, true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 2, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count"); + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing/arg_parsing,bpf_cookie", + &set, + true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 2, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]), + "subtest name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing/arg_parsing", &set, true), + "parsing"); + ASSERT_OK(parse_test_list("bpf_cookie", &set, true), "parsing"); + ASSERT_OK(parse_test_list("send_signal", &set, true), "parsing"); + if (!ASSERT_EQ(set.cnt, 3, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_EQ(set.tests[2].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]), + "subtest name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + ASSERT_OK(strcmp("send_signal", set.tests[2].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("bpf_cookie/trace", &set, false), "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name"); +error: + free_test_filter_set(&set); +} + +void test_arg_parsing(void) +{ + if (test__start_subtest("test_parse_test_list")) + test_parse_test_list(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index d48f6e533e1e..08c0601b3e84 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -11,15 +11,22 @@ static void trigger_func(void) asm volatile (""); } +/* attach point for byname uprobe */ +static void trigger_func2(void) +{ + asm volatile (""); +} + void test_attach_probe(void) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); - int duration = 0; struct bpf_link *kprobe_link, *kretprobe_link; struct bpf_link *uprobe_link, *uretprobe_link; struct test_attach_probe* skel; ssize_t uprobe_offset, ref_ctr_offset; + struct bpf_link *uprobe_err_link; bool legacy; + char *mem; /* Check if new-style kprobe/uprobe API is supported. * Kernels that support new FD-based kprobe and uprobe BPF attachment @@ -43,11 +50,12 @@ void test_attach_probe(void) return; skel = test_attach_probe__open_and_load(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + if (!ASSERT_OK_PTR(skel, "skel_open")) return; - if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n")) + if (!ASSERT_OK_PTR(skel->bss, "check_bss")) goto cleanup; + /* manual-attach kprobe/kretprobe */ kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe, false /* retprobe */, SYS_NANOSLEEP_KPROBE_NAME); @@ -62,6 +70,13 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_kretprobe = kretprobe_link; + /* auto-attachable kprobe and kretprobe */ + skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto); + ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto"); + + skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto); + ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto"); + if (!legacy) ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); @@ -90,25 +105,75 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_uretprobe = uretprobe_link; - /* trigger & validate kprobe && kretprobe */ - usleep(1); + /* verify auto-attach fails for old-style uprobe definition */ + uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname); + if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP, + "auto-attach should fail for old-style name")) + goto cleanup; + + uprobe_opts.func_name = "trigger_func2"; + uprobe_opts.retprobe = false; + uprobe_opts.ref_ctr_offset = 0; + skel->links.handle_uprobe_byname = + bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname, + 0 /* this pid */, + "/proc/self/exe", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname")) + goto cleanup; - if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res", - "wrong kprobe res: %d\n", skel->bss->kprobe_res)) + /* verify auto-attach works */ + skel->links.handle_uretprobe_byname = + bpf_program__attach(skel->progs.handle_uretprobe_byname); + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname")) goto cleanup; - if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res", - "wrong kretprobe res: %d\n", skel->bss->kretprobe_res)) + + /* test attach by name for a library function, using the library + * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo(). + */ + uprobe_opts.func_name = "malloc"; + uprobe_opts.retprobe = false; + skel->links.handle_uprobe_byname2 = + bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2, + 0 /* this pid */, + "libc.so.6", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2")) + goto cleanup; + + uprobe_opts.func_name = "free"; + uprobe_opts.retprobe = true; + skel->links.handle_uretprobe_byname2 = + bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2, + -1 /* any pid */, + "libc.so.6", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2")) goto cleanup; + /* trigger & validate kprobe && kretprobe */ + usleep(1); + + /* trigger & validate shared library u[ret]probes attached by name */ + mem = malloc(1); + free(mem); + /* trigger & validate uprobe & uretprobe */ trigger_func(); - if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res", - "wrong uprobe res: %d\n", skel->bss->uprobe_res)) - goto cleanup; - if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res", - "wrong uretprobe res: %d\n", skel->bss->uretprobe_res)) - goto cleanup; + /* trigger & validate uprobe attached by name */ + trigger_func2(); + + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); + ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res"); + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); + ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res"); + ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res"); + ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res"); + ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res"); + ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res"); + ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res"); + ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res"); cleanup: test_attach_probe__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 923a6139b2d8..83ef55e3caa4 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -4,8 +4,11 @@ #include <pthread.h> #include <sched.h> #include <sys/syscall.h> +#include <sys/mman.h> #include <unistd.h> #include <test_progs.h> +#include <network_helpers.h> +#include <bpf/btf.h> #include "test_bpf_cookie.skel.h" #include "kprobe_multi.skel.h" @@ -410,6 +413,88 @@ cleanup: bpf_link__destroy(link); } +static void tracing_subtest(struct test_bpf_cookie *skel) +{ + __u64 cookie; + int prog_fd; + int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1; + LIBBPF_OPTS(bpf_test_run_opts, opts); + LIBBPF_OPTS(bpf_link_create_opts, link_opts); + + skel->bss->fentry_res = 0; + skel->bss->fexit_res = 0; + + cookie = 0x10000000000000L; + prog_fd = bpf_program__fd(skel->progs.fentry_test1); + link_opts.tracing.cookie = cookie; + fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts); + if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create")) + goto cleanup; + + cookie = 0x20000000000000L; + prog_fd = bpf_program__fd(skel->progs.fexit_test1); + link_opts.tracing.cookie = cookie; + fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts); + if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create")) + goto cleanup; + + cookie = 0x30000000000000L; + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + link_opts.tracing.cookie = cookie; + fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts); + if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.fentry_test1); + bpf_prog_test_run_opts(prog_fd, &opts); + + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + bpf_prog_test_run_opts(prog_fd, &opts); + + ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res"); + ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res"); + ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res"); + +cleanup: + if (fentry_fd >= 0) + close(fentry_fd); + if (fexit_fd >= 0) + close(fexit_fd); + if (fmod_ret_fd >= 0) + close(fmod_ret_fd); +} + +int stack_mprotect(void); + +static void lsm_subtest(struct test_bpf_cookie *skel) +{ + __u64 cookie; + int prog_fd; + int lsm_fd = -1; + LIBBPF_OPTS(bpf_link_create_opts, link_opts); + + skel->bss->lsm_res = 0; + + cookie = 0x90000000000090L; + prog_fd = bpf_program__fd(skel->progs.test_int_hook); + link_opts.tracing.cookie = cookie; + lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts); + if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create")) + goto cleanup; + + stack_mprotect(); + if (!ASSERT_EQ(errno, EPERM, "stack_mprotect")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res"); + +cleanup: + if (lsm_fd >= 0) + close(lsm_fd); +} + void test_bpf_cookie(void) { struct test_bpf_cookie *skel; @@ -432,6 +517,10 @@ void test_bpf_cookie(void) tp_subtest(skel); if (test__start_subtest("perf_event")) pe_subtest(skel); + if (test__start_subtest("trampoline")) + tracing_subtest(skel); + if (test__start_subtest("lsm")) + lsm_subtest(skel); test_bpf_cookie__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 5142a7d130b2..7ff5fa93d056 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -26,6 +26,7 @@ #include "bpf_iter_bpf_sk_storage_map.skel.h" #include "bpf_iter_test_kern5.skel.h" #include "bpf_iter_test_kern6.skel.h" +#include "bpf_iter_bpf_link.skel.h" static int duration; @@ -34,8 +35,7 @@ static void test_btf_id_or_null(void) struct bpf_iter_test_kern3 *skel; skel = bpf_iter_test_kern3__open_and_load(); - if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", - "skeleton open_and_load unexpectedly succeeded\n")) { + if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { bpf_iter_test_kern3__destroy(skel); return; } @@ -52,7 +52,7 @@ static void do_dummy_read(struct bpf_program *prog) return; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* not check contents, but ensure read() ends without error */ @@ -87,8 +87,7 @@ static void test_ipv6_route(void) struct bpf_iter_ipv6_route *skel; skel = bpf_iter_ipv6_route__open_and_load(); - if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) return; do_dummy_read(skel->progs.dump_ipv6_route); @@ -101,8 +100,7 @@ static void test_netlink(void) struct bpf_iter_netlink *skel; skel = bpf_iter_netlink__open_and_load(); - if (CHECK(!skel, "bpf_iter_netlink__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load")) return; do_dummy_read(skel->progs.dump_netlink); @@ -115,8 +113,7 @@ static void test_bpf_map(void) struct bpf_iter_bpf_map *skel; skel = bpf_iter_bpf_map__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load")) return; do_dummy_read(skel->progs.dump_bpf_map); @@ -129,8 +126,7 @@ static void test_task(void) struct bpf_iter_task *skel; skel = bpf_iter_task__open_and_load(); - if (CHECK(!skel, "bpf_iter_task__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) return; do_dummy_read(skel->progs.dump_task); @@ -161,8 +157,7 @@ static void test_task_stack(void) struct bpf_iter_task_stack *skel; skel = bpf_iter_task_stack__open_and_load(); - if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load")) return; do_dummy_read(skel->progs.dump_task_stack); @@ -183,24 +178,22 @@ static void test_task_file(void) void *ret; skel = bpf_iter_task_file__open_and_load(); - if (CHECK(!skel, "bpf_iter_task_file__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load")) return; skel->bss->tgid = getpid(); - if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL), - "pthread_create", "pthread_create failed\n")) + if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL), + "pthread_create")) goto done; do_dummy_read(skel->progs.dump_task_file); - if (CHECK(pthread_join(thread_id, &ret) || ret != NULL, - "pthread_join", "pthread_join failed\n")) + if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, + "pthread_join")) goto done; - CHECK(skel->bss->count != 0, "check_count", - "invalid non pthread file visit count %d\n", skel->bss->count); + ASSERT_EQ(skel->bss->count, 0, "check_count"); done: bpf_iter_task_file__destroy(skel); @@ -224,7 +217,7 @@ static int do_btf_read(struct bpf_iter_task_btf *skel) return ret; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); @@ -238,9 +231,8 @@ static int do_btf_read(struct bpf_iter_task_btf *skel) if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno))) goto free_link; - CHECK(strstr(taskbuf, "(struct task_struct)") == NULL, - "check for btf representation of task_struct in iter data", - "struct task_struct not found"); + ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)", + "check for btf representation of task_struct in iter data"); free_link: if (iter_fd > 0) close(iter_fd); @@ -255,8 +247,7 @@ static void test_task_btf(void) int ret; skel = bpf_iter_task_btf__open_and_load(); - if (CHECK(!skel, "bpf_iter_task_btf__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load")) return; bss = skel->bss; @@ -265,12 +256,10 @@ static void test_task_btf(void) if (ret) goto cleanup; - if (CHECK(bss->tasks == 0, "check if iterated over tasks", - "no task iteration, did BPF program run?\n")) + if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?")) goto cleanup; - CHECK(bss->seq_err != 0, "check for unexpected err", - "bpf_seq_printf_btf returned %ld", bss->seq_err); + ASSERT_EQ(bss->seq_err, 0, "check for unexpected err"); cleanup: bpf_iter_task_btf__destroy(skel); @@ -281,8 +270,7 @@ static void test_tcp4(void) struct bpf_iter_tcp4 *skel; skel = bpf_iter_tcp4__open_and_load(); - if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load")) return; do_dummy_read(skel->progs.dump_tcp4); @@ -295,8 +283,7 @@ static void test_tcp6(void) struct bpf_iter_tcp6 *skel; skel = bpf_iter_tcp6__open_and_load(); - if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load")) return; do_dummy_read(skel->progs.dump_tcp6); @@ -309,8 +296,7 @@ static void test_udp4(void) struct bpf_iter_udp4 *skel; skel = bpf_iter_udp4__open_and_load(); - if (CHECK(!skel, "bpf_iter_udp4__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load")) return; do_dummy_read(skel->progs.dump_udp4); @@ -323,8 +309,7 @@ static void test_udp6(void) struct bpf_iter_udp6 *skel; skel = bpf_iter_udp6__open_and_load(); - if (CHECK(!skel, "bpf_iter_udp6__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load")) return; do_dummy_read(skel->progs.dump_udp6); @@ -349,7 +334,7 @@ static void test_unix(void) static int do_read_with_fd(int iter_fd, const char *expected, bool read_one_char) { - int err = -1, len, read_buf_len, start; + int len, read_buf_len, start; char buf[16] = {}; read_buf_len = read_one_char ? 1 : 16; @@ -363,9 +348,7 @@ static int do_read_with_fd(int iter_fd, const char *expected, if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) return -1; - err = strcmp(buf, expected); - if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n", - buf, expected)) + if (!ASSERT_STREQ(buf, expected, "read")) return -1; return 0; @@ -378,19 +361,17 @@ static void test_anon_iter(bool read_one_char) int iter_fd, err; skel = bpf_iter_test_kern1__open_and_load(); - if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load")) return; err = bpf_iter_test_kern1__attach(skel); - if (CHECK(err, "bpf_iter_test_kern1__attach", - "skeleton attach failed\n")) { + if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) { goto out; } link = skel->links.dump_task; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto out; do_read_with_fd(iter_fd, "abcd", read_one_char); @@ -423,8 +404,7 @@ static void test_file_iter(void) int err; skel1 = bpf_iter_test_kern1__open_and_load(); - if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load")) return; link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); @@ -447,12 +427,11 @@ static void test_file_iter(void) * should change. */ skel2 = bpf_iter_test_kern2__open_and_load(); - if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load")) goto unlink_path; err = bpf_link__update_program(link, skel2->progs.dump_task); - if (CHECK(err, "update_prog", "update_prog failed\n")) + if (!ASSERT_OK(err, "update_prog")) goto destroy_skel2; do_read(path, "ABCD"); @@ -478,8 +457,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) char *buf; skel = bpf_iter_test_kern4__open(); - if (CHECK(!skel, "bpf_iter_test_kern4__open", - "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open")) return; /* create two maps: bpf program will only do bpf_seq_write @@ -515,8 +493,8 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) } skel->rodata->ret1 = ret1; - if (CHECK(bpf_iter_test_kern4__load(skel), - "bpf_iter_test_kern4__load", "skeleton load failed\n")) + if (!ASSERT_OK(bpf_iter_test_kern4__load(skel), + "bpf_iter_test_kern4__load")) goto free_map2; /* setup filtering map_id in bpf program */ @@ -538,7 +516,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) goto free_map2; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; buf = malloc(expected_read_len); @@ -574,22 +552,16 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) goto free_buf; } - if (CHECK(total_read_len != expected_read_len, "read", - "total len %u, expected len %u\n", total_read_len, - expected_read_len)) + if (!ASSERT_EQ(total_read_len, expected_read_len, "read")) goto free_buf; - if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed", - "expected 1 actual %d\n", skel->bss->map1_accessed)) + if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed")) goto free_buf; - if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed", - "expected 2 actual %d\n", skel->bss->map2_accessed)) + if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed")) goto free_buf; - CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2, - "map2_seqnum", "two different seqnum %lld %lld\n", - skel->bss->map2_seqnum1, skel->bss->map2_seqnum2); + ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum"); free_buf: free(buf); @@ -622,15 +594,13 @@ static void test_bpf_hash_map(void) char buf[64]; skel = bpf_iter_bpf_hash_map__open(); - if (CHECK(!skel, "bpf_iter_bpf_hash_map__open", - "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open")) return; skel->bss->in_test_mode = true; err = bpf_iter_bpf_hash_map__load(skel); - if (CHECK(!skel, "bpf_iter_bpf_hash_map__load", - "skeleton load failed\n")) + if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load")) goto out; /* iterator with hashmap2 and hashmap3 should fail */ @@ -659,7 +629,7 @@ static void test_bpf_hash_map(void) expected_val += val; err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -669,7 +639,7 @@ static void test_bpf_hash_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -679,17 +649,11 @@ static void test_bpf_hash_map(void) goto close_iter; /* test results */ - if (CHECK(skel->bss->key_sum_a != expected_key_a, - "key_sum_a", "got %u expected %u\n", - skel->bss->key_sum_a, expected_key_a)) + if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) goto close_iter; - if (CHECK(skel->bss->key_sum_b != expected_key_b, - "key_sum_b", "got %u expected %u\n", - skel->bss->key_sum_b, expected_key_b)) + if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %llu expected %llu\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; close_iter: @@ -718,16 +682,14 @@ static void test_bpf_percpu_hash_map(void) void *val; skel = bpf_iter_bpf_percpu_hash_map__open(); - if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", - "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open")) return; skel->rodata->num_cpus = bpf_num_possible_cpus(); val = malloc(8 * bpf_num_possible_cpus()); err = bpf_iter_bpf_percpu_hash_map__load(skel); - if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", - "skeleton load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load")) goto out; /* update map values here */ @@ -745,7 +707,7 @@ static void test_bpf_percpu_hash_map(void) } err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -758,7 +720,7 @@ static void test_bpf_percpu_hash_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -768,17 +730,11 @@ static void test_bpf_percpu_hash_map(void) goto close_iter; /* test results */ - if (CHECK(skel->bss->key_sum_a != expected_key_a, - "key_sum_a", "got %u expected %u\n", - skel->bss->key_sum_a, expected_key_a)) + if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) goto close_iter; - if (CHECK(skel->bss->key_sum_b != expected_key_b, - "key_sum_b", "got %u expected %u\n", - skel->bss->key_sum_b, expected_key_b)) + if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %u expected %u\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; close_iter: @@ -803,8 +759,7 @@ static void test_bpf_array_map(void) int len, start; skel = bpf_iter_bpf_array_map__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) return; map_fd = bpf_map__fd(skel->maps.arraymap1); @@ -817,7 +772,7 @@ static void test_bpf_array_map(void) first_val = val; err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -830,7 +785,7 @@ static void test_bpf_array_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -850,21 +805,16 @@ static void test_bpf_array_map(void) res_first_key, res_first_val, first_val)) goto close_iter; - if (CHECK(skel->bss->key_sum != expected_key, - "key_sum", "got %u expected %u\n", - skel->bss->key_sum, expected_key)) + if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %llu expected %llu\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { err = bpf_map_lookup_elem(map_fd, &i, &val); - if (CHECK(err, "map_lookup", "map_lookup failed\n")) + if (!ASSERT_OK(err, "map_lookup")) goto out; - if (CHECK(i != val, "invalid_val", - "got value %llu expected %u\n", val, i)) + if (!ASSERT_EQ(i, val, "invalid_val")) goto out; } @@ -889,16 +839,14 @@ static void test_bpf_percpu_array_map(void) int len; skel = bpf_iter_bpf_percpu_array_map__open(); - if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", - "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open")) return; skel->rodata->num_cpus = bpf_num_possible_cpus(); val = malloc(8 * bpf_num_possible_cpus()); err = bpf_iter_bpf_percpu_array_map__load(skel); - if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", - "skeleton load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load")) goto out; /* update map values here */ @@ -912,7 +860,7 @@ static void test_bpf_percpu_array_map(void) } err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -925,7 +873,7 @@ static void test_bpf_percpu_array_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -935,13 +883,9 @@ static void test_bpf_percpu_array_map(void) goto close_iter; /* test results */ - if (CHECK(skel->bss->key_sum != expected_key, - "key_sum", "got %u expected %u\n", - skel->bss->key_sum, expected_key)) + if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %u expected %u\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; close_iter: @@ -966,17 +910,16 @@ static void test_bpf_sk_storage_delete(void) char buf[64]; skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) return; map_fd = bpf_map__fd(skel->maps.sk_stg_map); sock_fd = socket(AF_INET6, SOCK_STREAM, 0); - if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno)) + if (!ASSERT_GE(sock_fd, 0, "socket")) goto out; err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; memset(&linfo, 0, sizeof(linfo)); @@ -989,7 +932,7 @@ static void test_bpf_sk_storage_delete(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -1027,22 +970,21 @@ static void test_bpf_sk_storage_get(void) int sock_fd = -1; skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) return; sock_fd = socket(AF_INET6, SOCK_STREAM, 0); - if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno)) + if (!ASSERT_GE(sock_fd, 0, "socket")) goto out; err = listen(sock_fd, 1); - if (CHECK(err != 0, "listen", "errno: %d\n", errno)) + if (!ASSERT_OK(err, "listen")) goto close_socket; map_fd = bpf_map__fd(skel->maps.sk_stg_map); err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); - if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n")) + if (!ASSERT_OK(err, "bpf_map_update_elem")) goto close_socket; do_dummy_read(skel->progs.fill_socket_owner); @@ -1078,15 +1020,14 @@ static void test_bpf_sk_storage_map(void) char buf[64]; skel = bpf_iter_bpf_sk_storage_map__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) return; map_fd = bpf_map__fd(skel->maps.sk_stg_map); num_sockets = ARRAY_SIZE(sock_fd); for (i = 0; i < num_sockets; i++) { sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); - if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno)) + if (!ASSERT_GE(sock_fd[i], 0, "socket")) goto out; val = i + 1; @@ -1094,7 +1035,7 @@ static void test_bpf_sk_storage_map(void) err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, BPF_NOEXIST); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -1107,7 +1048,7 @@ static void test_bpf_sk_storage_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -1117,14 +1058,10 @@ static void test_bpf_sk_storage_map(void) goto close_iter; /* test results */ - if (CHECK(skel->bss->ipv6_sk_count != num_sockets, - "ipv6_sk_count", "got %u expected %u\n", - skel->bss->ipv6_sk_count, num_sockets)) + if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %u expected %u\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; close_iter: @@ -1147,8 +1084,7 @@ static void test_rdonly_buf_out_of_bound(void) struct bpf_link *link; skel = bpf_iter_test_kern5__open_and_load(); - if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load")) return; memset(&linfo, 0, sizeof(linfo)); @@ -1167,11 +1103,23 @@ static void test_buf_neg_offset(void) struct bpf_iter_test_kern6 *skel; skel = bpf_iter_test_kern6__open_and_load(); - if (CHECK(skel, "bpf_iter_test_kern6__open_and_load", - "skeleton open_and_load unexpected success\n")) + if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load")) bpf_iter_test_kern6__destroy(skel); } +static void test_link_iter(void) +{ + struct bpf_iter_bpf_link *skel; + + skel = bpf_iter_bpf_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_bpf_link); + + bpf_iter_bpf_link__destroy(skel); +} + #define CMP_BUFFER_SIZE 1024 static char task_vma_output[CMP_BUFFER_SIZE]; static char proc_maps_output[CMP_BUFFER_SIZE]; @@ -1192,8 +1140,6 @@ static void str_strip_first_line(char *str) *dst = '\0'; } -#define min(a, b) ((a) < (b) ? (a) : (b)) - static void test_task_vma(void) { int err, iter_fd = -1, proc_maps_fd = -1; @@ -1202,13 +1148,13 @@ static void test_task_vma(void) char maps_path[64]; skel = bpf_iter_task_vma__open(); - if (CHECK(!skel, "bpf_iter_task_vma__open", "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open")) return; skel->bss->pid = getpid(); err = bpf_iter_task_vma__load(skel); - if (CHECK(err, "bpf_iter_task_vma__load", "skeleton load failed\n")) + if (!ASSERT_OK(err, "bpf_iter_task_vma__load")) goto out; skel->links.proc_maps = bpf_program__attach_iter( @@ -1220,7 +1166,7 @@ static void test_task_vma(void) } iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto out; /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks @@ -1229,10 +1175,10 @@ static void test_task_vma(void) len = 0; while (len < CMP_BUFFER_SIZE) { err = read_fd_into_buffer(iter_fd, task_vma_output + len, - min(read_size, CMP_BUFFER_SIZE - len)); + MIN(read_size, CMP_BUFFER_SIZE - len)); if (!err) break; - if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n")) + if (!ASSERT_GE(err, 0, "read_iter_fd")) goto out; len += err; } @@ -1240,18 +1186,17 @@ static void test_task_vma(void) /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); proc_maps_fd = open(maps_path, O_RDONLY); - if (CHECK(proc_maps_fd < 0, "open_proc_maps", "open_proc_maps failed\n")) + if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps")) goto out; err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE); - if (CHECK(err < 0, "read_prog_maps_fd", "read_prog_maps_fd failed\n")) + if (!ASSERT_GE(err, 0, "read_prog_maps_fd")) goto out; /* strip and compare the first line of the two files */ str_strip_first_line(task_vma_output); str_strip_first_line(proc_maps_output); - CHECK(strcmp(task_vma_output, proc_maps_output), "compare_output", - "found mismatch\n"); + ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output"); out: close(proc_maps_fd); close(iter_fd); @@ -1320,4 +1265,6 @@ void test_bpf_iter(void) test_rdonly_buf_out_of_bound(); if (test__start_subtest("buf-neg-offset")) test_buf_neg_offset(); + if (test__start_subtest("link-iter")) + test_link_iter(); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c index d43f548c572c..a4d0cc9d3367 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c @@ -36,13 +36,13 @@ struct test_config { void (*bpf_destroy)(void *); }; -enum test_state { +enum bpf_test_state { _TS_INVALID, TS_MODULE_LOAD, TS_MODULE_LOAD_FAIL, }; -static _Atomic enum test_state state = _TS_INVALID; +static _Atomic enum bpf_test_state state = _TS_INVALID; static int sys_finit_module(int fd, const char *param_values, int flags) { diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 8f7a1cef7d87..e9a9a31b2ffe 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -10,8 +10,6 @@ #include "bpf_tcp_nogpl.skel.h" #include "bpf_dctcp_release.skel.h" -#define min(a, b) ((a) < (b) ? (a) : (b)) - #ifndef ENOTSUPP #define ENOTSUPP 524 #endif @@ -53,7 +51,7 @@ static void *server(void *arg) while (bytes < total_bytes && !READ_ONCE(stop)) { nr_sent = send(fd, &batch, - min(total_bytes - bytes, sizeof(batch)), 0); + MIN(total_bytes - bytes, sizeof(batch)), 0); if (nr_sent == -1 && errno == EINTR) continue; if (nr_sent == -1) { @@ -146,7 +144,7 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) /* recv total_bytes */ while (bytes < total_bytes && !READ_ONCE(stop)) { nr_recv = recv(fd, &batch, - min(total_bytes - bytes, sizeof(batch)), 0); + MIN(total_bytes - bytes, sizeof(batch)), 0); if (nr_recv == -1 && errno == EINTR) continue; if (nr_recv == -1) diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index ec823561b912..ba5bde53d418 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -8,7 +8,6 @@ #include <linux/filter.h> #include <linux/unistd.h> #include <bpf/bpf.h> -#include <sys/resource.h> #include <libelf.h> #include <gelf.h> #include <string.h> @@ -3974,6 +3973,105 @@ static struct btf_raw_test raw_tests[] = { .value_type_id = 1, .max_entries = 1, }, +{ + .descr = "type_tag test #2, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_CONST_ENC(3), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #3, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #4, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #5, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(1), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 2), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "type_tag test #6, type tag order", + .raw_types = { + BTF_PTR_ENC(2), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */ + BTF_PTR_ENC(6), /* [5] */ + BTF_CONST_ENC(2), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, }; /* struct btf_raw_test raw_tests[] */ diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c index 1dfe14ff6aa4..f2ce4fd1cdae 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c +++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c @@ -167,7 +167,7 @@ void test_core_autosize(void) if (!ASSERT_OK_PTR(bss_map, "bss_map_find")) goto cleanup; - err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out); + err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0); if (!ASSERT_OK(err, "bss_lookup")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index f28f75aa9154..3712dfe1be59 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -277,13 +277,21 @@ static int duration = 0; #define SIZE_OUTPUT_DATA(type) \ STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \ .int_sz = sizeof(((type *)0)->int_field), \ + .int_off = offsetof(type, int_field), \ .struct_sz = sizeof(((type *)0)->struct_field), \ + .struct_off = offsetof(type, struct_field), \ .union_sz = sizeof(((type *)0)->union_field), \ + .union_off = offsetof(type, union_field), \ .arr_sz = sizeof(((type *)0)->arr_field), \ - .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \ + .arr_off = offsetof(type, arr_field), \ + .arr_elem_sz = sizeof(((type *)0)->arr_field[1]), \ + .arr_elem_off = offsetof(type, arr_field[1]), \ .ptr_sz = 8, /* always 8-byte pointer for BPF */ \ + .ptr_off = offsetof(type, ptr_field), \ .enum_sz = sizeof(((type *)0)->enum_field), \ + .enum_off = offsetof(type, enum_field), \ .float_sz = sizeof(((type *)0)->float_field), \ + .float_off = offsetof(type, float_field), \ } #define SIZE_CASE(name) { \ @@ -714,9 +722,10 @@ static const struct core_reloc_test_case test_cases[] = { }), BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), - /* size relocation checks */ + /* field size and offset relocation checks */ SIZE_CASE(size), SIZE_CASE(size___diff_sz), + SIZE_CASE(size___diff_offs), SIZE_ERR_CASE(size___err_ambiguous), /* validate type existence and size relocations */ diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c index 6acb0e94d4d7..4a2c256c8db6 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_retro.c +++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c @@ -6,31 +6,32 @@ void test_core_retro(void) { - int err, zero = 0, res, duration = 0, my_pid = getpid(); + int err, zero = 0, res, my_pid = getpid(); struct test_core_retro *skel; /* load program */ skel = test_core_retro__open_and_load(); - if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) + if (!ASSERT_OK_PTR(skel, "skel_load")) goto out_close; - err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0); - if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno)) + err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero), + &my_pid, sizeof(my_pid), 0); + if (!ASSERT_OK(err, "map_update")) goto out_close; /* attach probe */ err = test_core_retro__attach(skel); - if (CHECK(err, "attach_kprobe", "err %d\n", err)) + if (!ASSERT_OK(err, "attach_kprobe")) goto out_close; /* trigger */ usleep(1); - err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res); - if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno)) + err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0); + if (!ASSERT_OK(err, "map_lookup")) goto out_close; - CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid); + ASSERT_EQ(res, my_pid, "pid_check"); out_close: test_core_retro__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c index 5aa52cc31dc2..c11832657d2b 100644 --- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021. Huawei Technologies Co., Ltd */ #include <test_progs.h> #include "dummy_st_ops.skel.h" +#include "trace_dummy_st_ops.skel.h" /* Need to keep consistent with definition in include/linux/bpf.h */ struct bpf_dummy_ops_state { @@ -56,6 +57,7 @@ static void test_dummy_init_ptr_arg(void) .ctx_in = args, .ctx_size_in = sizeof(args), ); + struct trace_dummy_st_ops *trace_skel; struct dummy_st_ops *skel; int fd, err; @@ -64,12 +66,33 @@ static void test_dummy_init_ptr_arg(void) return; fd = bpf_program__fd(skel->progs.test_1); + + trace_skel = trace_dummy_st_ops__open(); + if (!ASSERT_OK_PTR(trace_skel, "trace_dummy_st_ops__open")) + goto done; + + err = bpf_program__set_attach_target(trace_skel->progs.fentry_test_1, + fd, "test_1"); + if (!ASSERT_OK(err, "set_attach_target(fentry_test_1)")) + goto done; + + err = trace_dummy_st_ops__load(trace_skel); + if (!ASSERT_OK(err, "load(trace_skel)")) + goto done; + + err = trace_dummy_st_ops__attach(trace_skel); + if (!ASSERT_OK(err, "attach(trace_skel)")) + goto done; + err = bpf_prog_test_run_opts(fd, &attr); ASSERT_OK(err, "test_run"); ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret"); ASSERT_EQ(attr.retval, exp_retval, "test_ret"); + ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val"); +done: dummy_st_ops__destroy(skel); + trace_dummy_st_ops__destroy(trace_skel); } static void test_dummy_multiple_args(void) diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c new file mode 100644 index 000000000000..3c7aa82b98e2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include <test_progs.h> +#include "dynptr_fail.skel.h" +#include "dynptr_success.skel.h" + +static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct { + const char *prog_name; + const char *expected_err_msg; +} dynptr_tests[] = { + /* failure cases */ + {"ringbuf_missing_release1", "Unreleased reference id=1"}, + {"ringbuf_missing_release2", "Unreleased reference id=2"}, + {"ringbuf_missing_release_callback", "Unreleased reference id"}, + {"use_after_invalid", "Expected an initialized dynptr as arg #3"}, + {"ringbuf_invalid_api", "type=mem expected=alloc_mem"}, + {"add_dynptr_to_map1", "invalid indirect read from stack"}, + {"add_dynptr_to_map2", "invalid indirect read from stack"}, + {"data_slice_out_of_bounds_ringbuf", "value is outside of the allowed memory range"}, + {"data_slice_out_of_bounds_map_value", "value is outside of the allowed memory range"}, + {"data_slice_use_after_release", "invalid mem access 'scalar'"}, + {"data_slice_missing_null_check1", "invalid mem access 'mem_or_null'"}, + {"data_slice_missing_null_check2", "invalid mem access 'mem_or_null'"}, + {"invalid_helper1", "invalid indirect read from stack"}, + {"invalid_helper2", "Expected an initialized dynptr as arg #3"}, + {"invalid_write1", "Expected an initialized dynptr as arg #1"}, + {"invalid_write2", "Expected an initialized dynptr as arg #3"}, + {"invalid_write3", "Expected an initialized ringbuf dynptr as arg #1"}, + {"invalid_write4", "arg 1 is an unacquired reference"}, + {"invalid_read1", "invalid read from stack"}, + {"invalid_read2", "cannot pass in dynptr at an offset"}, + {"invalid_read3", "invalid read from stack"}, + {"invalid_read4", "invalid read from stack"}, + {"invalid_offset", "invalid write to stack"}, + {"global", "type=map_value expected=fp"}, + {"release_twice", "arg 1 is an unacquired reference"}, + {"release_twice_callback", "arg 1 is an unacquired reference"}, + {"dynptr_from_mem_invalid_api", + "Unsupported reg type fp for bpf_dynptr_from_mem data"}, + + /* success cases */ + {"test_read_write", NULL}, + {"test_data_slice", NULL}, + {"test_ringbuf", NULL}, +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct bpf_program *prog; + struct dynptr_fail *skel; + int err; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = dynptr_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + err = dynptr_fail__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + dynptr_fail__destroy(skel); +} + +static void verify_success(const char *prog_name) +{ + struct dynptr_success *skel; + struct bpf_program *prog; + struct bpf_link *link; + + skel = dynptr_success__open(); + if (!ASSERT_OK_PTR(skel, "dynptr_success__open")) + return; + + skel->bss->pid = getpid(); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + dynptr_success__load(skel); + if (!ASSERT_OK_PTR(skel, "dynptr_success__load")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->err, 0, "err"); + + bpf_link__destroy(link); + +cleanup: + dynptr_success__destroy(skel); +} + +void test_dynptr(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dynptr_tests); i++) { + if (!test__start_subtest(dynptr_tests[i].prog_name)) + continue; + + if (dynptr_tests[i].expected_err_msg) + verify_fail(dynptr_tests[i].prog_name, + dynptr_tests[i].expected_err_msg); + else + verify_success(dynptr_tests[i].prog_name); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c index 3ee2107bbf7a..a7e74297f15f 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -5,7 +5,7 @@ /* that's kernel internal BPF_MAX_TRAMP_PROGS define */ #define CNT 38 -void test_fexit_stress(void) +void serial_test_fexit_stress(void) { char test_skb[128] = {}; int fexit_fd[CNT] = {}; @@ -53,7 +53,7 @@ void test_fexit_stress(void) &trace_opts); if (!ASSERT_GE(fexit_fd[i], 0, "fexit load")) goto out; - link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]); + link_fd[i] = bpf_link_create(fexit_fd[i], 0, BPF_TRACE_FEXIT, NULL); if (!ASSERT_GE(link_fd[i], 0, "fexit attach")) goto out; } diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c index 044df13ee069..8963f8a549f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/for_each.c +++ b/tools/testing/selftests/bpf/prog_tests/for_each.c @@ -4,14 +4,16 @@ #include <network_helpers.h> #include "for_each_hash_map_elem.skel.h" #include "for_each_array_map_elem.skel.h" +#include "for_each_map_elem_write_key.skel.h" static unsigned int duration; static void test_hash_map(void) { - int i, err, hashmap_fd, max_entries, percpu_map_fd; + int i, err, max_entries; struct for_each_hash_map_elem *skel; __u64 *percpu_valbuf = NULL; + size_t percpu_val_sz; __u32 key, num_cpus; __u64 val; LIBBPF_OPTS(bpf_test_run_opts, topts, @@ -24,26 +26,27 @@ static void test_hash_map(void) if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load")) return; - hashmap_fd = bpf_map__fd(skel->maps.hashmap); max_entries = bpf_map__max_entries(skel->maps.hashmap); for (i = 0; i < max_entries; i++) { key = i; val = i + 1; - err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY); + err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key), + &val, sizeof(val), BPF_ANY); if (!ASSERT_OK(err, "map_update")) goto out; } num_cpus = bpf_num_possible_cpus(); - percpu_map_fd = bpf_map__fd(skel->maps.percpu_map); - percpu_valbuf = malloc(sizeof(__u64) * num_cpus); + percpu_val_sz = sizeof(__u64) * num_cpus; + percpu_valbuf = malloc(percpu_val_sz); if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf")) goto out; key = 1; for (i = 0; i < num_cpus; i++) percpu_valbuf[i] = i + 1; - err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY); + err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), + percpu_valbuf, percpu_val_sz, BPF_ANY); if (!ASSERT_OK(err, "percpu_map_update")) goto out; @@ -57,7 +60,7 @@ static void test_hash_map(void) ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems"); key = 1; - err = bpf_map_lookup_elem(hashmap_fd, &key, &val); + err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0); ASSERT_ERR(err, "hashmap_lookup"); ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called"); @@ -74,9 +77,10 @@ out: static void test_array_map(void) { __u32 key, num_cpus, max_entries; - int i, arraymap_fd, percpu_map_fd, err; + int i, err; struct for_each_array_map_elem *skel; __u64 *percpu_valbuf = NULL; + size_t percpu_val_sz; __u64 val, expected_total; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, @@ -88,7 +92,6 @@ static void test_array_map(void) if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load")) return; - arraymap_fd = bpf_map__fd(skel->maps.arraymap); expected_total = 0; max_entries = bpf_map__max_entries(skel->maps.arraymap); for (i = 0; i < max_entries; i++) { @@ -97,21 +100,23 @@ static void test_array_map(void) /* skip the last iteration for expected total */ if (i != max_entries - 1) expected_total += val; - err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY); + err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key), + &val, sizeof(val), BPF_ANY); if (!ASSERT_OK(err, "map_update")) goto out; } num_cpus = bpf_num_possible_cpus(); - percpu_map_fd = bpf_map__fd(skel->maps.percpu_map); - percpu_valbuf = malloc(sizeof(__u64) * num_cpus); + percpu_val_sz = sizeof(__u64) * num_cpus; + percpu_valbuf = malloc(percpu_val_sz); if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf")) goto out; key = 0; for (i = 0; i < num_cpus; i++) percpu_valbuf[i] = i + 1; - err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY); + err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), + percpu_valbuf, percpu_val_sz, BPF_ANY); if (!ASSERT_OK(err, "percpu_map_update")) goto out; @@ -129,10 +134,21 @@ out: for_each_array_map_elem__destroy(skel); } +static void test_write_map_key(void) +{ + struct for_each_map_elem_write_key *skel; + + skel = for_each_map_elem_write_key__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load")) + for_each_map_elem_write_key__destroy(skel); +} + void test_for_each(void) { if (test__start_subtest("hash_map")) test_hash_map(); if (test__start_subtest("array_map")) test_array_map(); + if (test__start_subtest("write_map_key")) + test_write_map_key(); } diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c index e1de5f80c3b2..0354f9b82c65 100644 --- a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c +++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c @@ -6,11 +6,10 @@ void test_helper_restricted(void) { int prog_i = 0, prog_cnt; - int duration = 0; do { struct test_helper_restricted *test; - int maybeOK; + int err; test = test_helper_restricted__open(); if (!ASSERT_OK_PTR(test, "open")) @@ -21,12 +20,11 @@ void test_helper_restricted(void) for (int j = 0; j < prog_cnt; ++j) { struct bpf_program *prog = *test->skeleton->progs[j].prog; - maybeOK = bpf_program__set_autoload(prog, prog_i == j); - ASSERT_OK(maybeOK, "set autoload"); + bpf_program__set_autoload(prog, true); } - maybeOK = test_helper_restricted__load(test); - CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted"); + err = test_helper_restricted__load(test); + ASSERT_ERR(err, "load_should_fail"); test_helper_restricted__destroy(test); } while (++prog_i < prog_cnt); diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index b9876b55fc0c..586dc52d6fb9 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -2,6 +2,9 @@ #include <test_progs.h> #include "kprobe_multi.skel.h" #include "trace_helpers.h" +#include "kprobe_multi_empty.skel.h" +#include "bpf/libbpf_internal.h" +#include "bpf/hashmap.h" static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return) { @@ -140,14 +143,14 @@ test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts) goto cleanup; skel->bss->pid = getpid(); - link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, pattern, opts); if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) goto cleanup; if (opts) { opts->retprobe = true; - link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, + link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe_manual, pattern, opts); if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) goto cleanup; @@ -232,7 +235,7 @@ static void test_attach_api_fails(void) skel->bss->pid = getpid(); /* fail_1 - pattern and opts NULL */ - link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, NULL, NULL); if (!ASSERT_ERR_PTR(link, "fail_1")) goto cleanup; @@ -246,7 +249,7 @@ static void test_attach_api_fails(void) opts.cnt = ARRAY_SIZE(syms); opts.cookies = NULL; - link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, NULL, &opts); if (!ASSERT_ERR_PTR(link, "fail_2")) goto cleanup; @@ -260,7 +263,7 @@ static void test_attach_api_fails(void) opts.cnt = ARRAY_SIZE(syms); opts.cookies = NULL; - link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, "ksys_*", &opts); if (!ASSERT_ERR_PTR(link, "fail_3")) goto cleanup; @@ -274,7 +277,7 @@ static void test_attach_api_fails(void) opts.cnt = ARRAY_SIZE(syms); opts.cookies = NULL; - link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, "ksys_*", &opts); if (!ASSERT_ERR_PTR(link, "fail_4")) goto cleanup; @@ -288,7 +291,7 @@ static void test_attach_api_fails(void) opts.cnt = 0; opts.cookies = cookies; - link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, "ksys_*", &opts); if (!ASSERT_ERR_PTR(link, "fail_5")) goto cleanup; @@ -301,6 +304,146 @@ cleanup: kprobe_multi__destroy(skel); } +static inline __u64 get_time_ns(void) +{ + struct timespec t; + + clock_gettime(CLOCK_MONOTONIC, &t); + return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; +} + +static size_t symbol_hash(const void *key, void *ctx __maybe_unused) +{ + return str_hash((const char *) key); +} + +static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_unused) +{ + return strcmp((const char *) key1, (const char *) key2) == 0; +} + +static int get_syms(char ***symsp, size_t *cntp) +{ + size_t cap = 0, cnt = 0, i; + char *name, **syms = NULL; + struct hashmap *map; + char buf[256]; + FILE *f; + int err; + + /* + * The available_filter_functions contains many duplicates, + * but other than that all symbols are usable in kprobe multi + * interface. + * Filtering out duplicates by using hashmap__add, which won't + * add existing entry. + */ + f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r"); + if (!f) + return -EINVAL; + + map = hashmap__new(symbol_hash, symbol_equal, NULL); + if (IS_ERR(map)) { + err = libbpf_get_error(map); + goto error; + } + + while (fgets(buf, sizeof(buf), f)) { + /* skip modules */ + if (strchr(buf, '[')) + continue; + if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1) + continue; + /* + * We attach to almost all kernel functions and some of them + * will cause 'suspicious RCU usage' when fprobe is attached + * to them. Filter out the current culprits - arch_cpu_idle + * and rcu_* functions. + */ + if (!strcmp(name, "arch_cpu_idle")) + continue; + if (!strncmp(name, "rcu_", 4)) + continue; + err = hashmap__add(map, name, NULL); + if (err) { + free(name); + if (err == -EEXIST) + continue; + goto error; + } + err = libbpf_ensure_mem((void **) &syms, &cap, + sizeof(*syms), cnt + 1); + if (err) { + free(name); + goto error; + } + syms[cnt] = name; + cnt++; + } + + *symsp = syms; + *cntp = cnt; + +error: + fclose(f); + hashmap__free(map); + if (err) { + for (i = 0; i < cnt; i++) + free(syms[cnt]); + free(syms); + } + return err; +} + +static void test_bench_attach(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct kprobe_multi_empty *skel = NULL; + long attach_start_ns, attach_end_ns; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + struct bpf_link *link = NULL; + char **syms = NULL; + size_t cnt, i; + + if (!ASSERT_OK(get_syms(&syms, &cnt), "get_syms")) + return; + + skel = kprobe_multi_empty__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load")) + goto cleanup; + + opts.syms = (const char **) syms; + opts.cnt = cnt; + + attach_start_ns = get_time_ns(); + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty, + NULL, &opts); + attach_end_ns = get_time_ns(); + + if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + detach_start_ns = get_time_ns(); + bpf_link__destroy(link); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: found %lu functions\n", __func__, cnt); + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); + +cleanup: + kprobe_multi_empty__destroy(skel); + if (syms) { + for (i = 0; i < cnt; i++) + free(syms[i]); + free(syms); + } +} + void test_kprobe_multi_test(void) { if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) @@ -320,4 +463,6 @@ void test_kprobe_multi_test(void) test_attach_api_syms(); if (test__start_subtest("attach_api_fails")) test_attach_api_fails(); + if (test__start_subtest("bench_attach")) + test_bench_attach(); } diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c index f6933b06daf8..1d7a2f1e0731 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c @@ -138,12 +138,16 @@ cleanup: test_ksyms_weak_lskel__destroy(skel); } -static void test_write_check(void) +static void test_write_check(bool test_handler1) { struct test_ksyms_btf_write_check *skel; - skel = test_ksyms_btf_write_check__open_and_load(); - ASSERT_ERR_PTR(skel, "unexpected load of a prog writing to ksym memory\n"); + skel = test_ksyms_btf_write_check__open(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_btf_write_check__open")) + return; + bpf_program__set_autoload(test_handler1 ? skel->progs.handler2 : skel->progs.handler1, false); + ASSERT_ERR(test_ksyms_btf_write_check__load(skel), + "unexpected load of a prog writing to ksym memory\n"); test_ksyms_btf_write_check__destroy(skel); } @@ -179,6 +183,9 @@ void test_ksyms_btf(void) if (test__start_subtest("weak_ksyms_lskel")) test_weak_syms_lskel(); - if (test__start_subtest("write_check")) - test_write_check(); + if (test__start_subtest("write_check1")) + test_write_check(true); + + if (test__start_subtest("write_check2")) + test_write_check(false); } diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c index e9916f2817ec..cad664546912 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c @@ -14,6 +14,12 @@ void test_linked_funcs(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; + /* handler1 and handler2 are marked as SEC("?raw_tp/sys_enter") and + * are set to not autoload by default + */ + bpf_program__set_autoload(skel->progs.handler1, true); + bpf_program__set_autoload(skel->progs.handler2, true); + skel->rodata->my_tid = syscall(SYS_gettid); skel->bss->syscall_id = SYS_getpgid; diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c new file mode 100644 index 000000000000..f4ffdcabf4e4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_log_fixup.skel.h" + +enum trunc_type { + TRUNC_NONE, + TRUNC_PARTIAL, + TRUNC_FULL, +}; + +static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.bad_relo, true); + memset(log_buf, 0, sizeof(log_buf)); + bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + "0: <invalid CO-RE relocation>\n" + "failed to resolve CO-RE relocation <byte_sz> ", + "log_buf_part1"); + + switch (trunc_type) { + case TRUNC_NONE: + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field (0:1 @ offset 4)\n", + "log_buf_part2"); + ASSERT_HAS_SUBSTR(log_buf, + "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n", + "log_buf_end"); + break; + case TRUNC_PARTIAL: + /* we should get full libbpf message patch */ + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field (0:1 @ offset 4)\n", + "log_buf_part2"); + /* we shouldn't get full end of BPF verifier log */ + ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"), + "log_buf_end"); + break; + case TRUNC_FULL: + /* we shouldn't get second part of libbpf message patch */ + ASSERT_NULL(strstr(log_buf, "struct task_struct___bad.fake_field (0:1 @ offset 4)\n"), + "log_buf_part2"); + /* we shouldn't get full end of BPF verifier log */ + ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"), + "log_buf_end"); + break; + } + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); +cleanup: + test_log_fixup__destroy(skel); +} + +static void bad_core_relo_subprog(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.bad_relo_subprog, true); + bpf_program__set_log_buf(skel->progs.bad_relo_subprog, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + ": <invalid CO-RE relocation>\n" + "failed to resolve CO-RE relocation <byte_off> ", + "log_buf"); + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field_subprog (0:2 @ offset 8)\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + +static void missing_map(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_map__set_autocreate(skel->maps.missing_map, false); + + bpf_program__set_autoload(skel->progs.use_missing_map, true); + bpf_program__set_log_buf(skel->progs.use_missing_map, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_TRUE(bpf_map__autocreate(skel->maps.existing_map), "existing_map_autocreate"); + ASSERT_FALSE(bpf_map__autocreate(skel->maps.missing_map), "missing_map_autocreate"); + + ASSERT_HAS_SUBSTR(log_buf, + "8: <invalid BPF map reference>\n" + "BPF map 'missing_map' is referenced but wasn't created\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + +void test_log_fixup(void) +{ + if (test__start_subtest("bad_core_relo_trunc_none")) + bad_core_relo(0, TRUNC_NONE /* full buf */); + if (test__start_subtest("bad_core_relo_trunc_partial")) + bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */); + if (test__start_subtest("bad_core_relo_trunc_full")) + bad_core_relo(250, TRUNC_FULL /* truncate also libbpf's message patch */); + if (test__start_subtest("bad_core_relo_subprog")) + bad_core_relo_subprog(); + if (test__start_subtest("missing_map")) + missing_map(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c index beebfa9730e1..a767bb4a271c 100644 --- a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c +++ b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c @@ -112,7 +112,8 @@ static void test_lookup_and_delete_hash(void) /* Lookup and delete element. */ key = 1; - err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value); + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), &value, sizeof(value), 0); if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) goto cleanup; @@ -147,7 +148,8 @@ static void test_lookup_and_delete_percpu_hash(void) /* Lookup and delete element. */ key = 1; - err = bpf_map_lookup_and_delete_elem(map_fd, &key, value); + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), value, sizeof(value), 0); if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) goto cleanup; @@ -191,7 +193,8 @@ static void test_lookup_and_delete_lru_hash(void) goto cleanup; /* Lookup and delete element 3. */ - err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value); + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), &value, sizeof(value), 0); if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) goto cleanup; @@ -240,10 +243,10 @@ static void test_lookup_and_delete_lru_percpu_hash(void) value[i] = 0; /* Lookup and delete element 3. */ - err = bpf_map_lookup_and_delete_elem(map_fd, &key, value); - if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) { + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), value, sizeof(value), 0); + if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) goto cleanup; - } /* Check if only one CPU has set the value. */ for (i = 0; i < nr_cpus; i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c new file mode 100644 index 000000000000..fdcea7a61491 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "map_kptr.skel.h" +#include "map_kptr_fail.skel.h" + +static char log_buf[1024 * 1024]; + +struct { + const char *prog_name; + const char *err_msg; +} map_kptr_fail_tests[] = { + { "size_not_bpf_dw", "kptr access size must be BPF_DW" }, + { "non_const_var_off", "kptr access cannot have variable offset" }, + { "non_const_var_off_kptr_xchg", "R1 doesn't have constant offset. kptr has to be" }, + { "misaligned_access_write", "kptr access misaligned expected=8 off=7" }, + { "misaligned_access_read", "kptr access misaligned expected=8 off=1" }, + { "reject_var_off_store", "variable untrusted_ptr_ access var_off=(0x0; 0x1e0)" }, + { "reject_bad_type_match", "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc" }, + { "marked_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" }, + { "correct_btf_id_check_size", "access beyond struct prog_test_ref_kfunc at off 32 size 4" }, + { "inherit_untrusted_on_walk", "R1 type=untrusted_ptr_ expected=percpu_ptr_" }, + { "reject_kptr_xchg_on_unref", "off=8 kptr isn't referenced kptr" }, + { "reject_kptr_get_no_map_val", "arg#0 expected pointer to map value" }, + { "reject_kptr_get_no_null_map_val", "arg#0 expected pointer to map value" }, + { "reject_kptr_get_no_kptr", "arg#0 no referenced kptr at map value offset=0" }, + { "reject_kptr_get_on_unref", "arg#0 no referenced kptr at map value offset=8" }, + { "reject_kptr_get_bad_type_match", "kernel function bpf_kfunc_call_test_kptr_get args#0" }, + { "mark_ref_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" }, + { "reject_untrusted_store_to_ref", "store to referenced kptr disallowed" }, + { "reject_bad_type_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member" }, + { "reject_untrusted_xchg", "R2 type=untrusted_ptr_ expected=ptr_" }, + { "reject_member_of_ref_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc" }, + { "reject_indirect_helper_access", "kptr cannot be accessed indirectly by helper" }, + { "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" }, + { "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" }, + { "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" }, +}; + +static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct map_kptr_fail *skel; + struct bpf_program *prog; + int ret; + + skel = map_kptr_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "map_kptr_fail__open_opts")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto end; + + bpf_program__set_autoload(prog, true); + + ret = map_kptr_fail__load(skel); + if (!ASSERT_ERR(ret, "map_kptr__load must fail")) + goto end; + + if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + +end: + map_kptr_fail__destroy(skel); +} + +static void test_map_kptr_fail(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(map_kptr_fail_tests); i++) { + if (!test__start_subtest(map_kptr_fail_tests[i].prog_name)) + continue; + test_map_kptr_fail_prog(map_kptr_fail_tests[i].prog_name, + map_kptr_fail_tests[i].err_msg); + } +} + +static void test_map_kptr_success(bool test_run) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct map_kptr *skel; + int key = 0, ret; + char buf[16]; + + skel = map_kptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts); + ASSERT_OK(ret, "test_map_kptr_ref refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref retval"); + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts); + ASSERT_OK(ret, "test_map_kptr_ref2 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval"); + + if (test_run) + return; + + ret = bpf_map__update_elem(skel->maps.array_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "array_map update"); + ret = bpf_map__update_elem(skel->maps.array_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "array_map update2"); + + ret = bpf_map__update_elem(skel->maps.hash_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "hash_map update"); + ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "hash_map delete"); + + ret = bpf_map__update_elem(skel->maps.hash_malloc_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "hash_malloc_map update"); + ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "hash_malloc_map delete"); + + ret = bpf_map__update_elem(skel->maps.lru_hash_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "lru_hash_map update"); + ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "lru_hash_map delete"); + + map_kptr__destroy(skel); +} + +void test_map_kptr(void) +{ + if (test__start_subtest("success")) { + test_map_kptr_success(false); + /* Do test_run twice, so that we see refcount going back to 1 + * after we leave it in map from first iteration. + */ + test_map_kptr_success(true); + } + test_map_kptr_fail(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c new file mode 100644 index 000000000000..bfb1bf3fd427 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Bytedance */ + +#include <test_progs.h> +#include "test_map_lookup_percpu_elem.skel.h" + +void test_map_lookup_percpu_elem(void) +{ + struct test_map_lookup_percpu_elem *skel; + __u64 key = 0, sum; + int ret, i, nr_cpus = libbpf_num_possible_cpus(); + __u64 *buf; + + buf = malloc(nr_cpus*sizeof(__u64)); + if (!ASSERT_OK_PTR(buf, "malloc")) + return; + + for (i = 0; i < nr_cpus; i++) + buf[i] = i; + sum = (nr_cpus - 1) * nr_cpus / 2; + + skel = test_map_lookup_percpu_elem__open(); + if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open")) + goto exit; + + skel->rodata->my_pid = getpid(); + skel->rodata->nr_cpus = nr_cpus; + + ret = test_map_lookup_percpu_elem__load(skel); + if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load")) + goto cleanup; + + ret = test_map_lookup_percpu_elem__attach(skel); + if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach")) + goto cleanup; + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_array_map update"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_hash_map update"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_lru_hash_map update"); + + syscall(__NR_getuid); + + test_map_lookup_percpu_elem__detach(skel); + + ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem"); + ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem"); + ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem"); + +cleanup: + test_map_lookup_percpu_elem__destroy(skel); +exit: + free(buf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c new file mode 100644 index 000000000000..59f08d6d1d53 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Tessares SA. */ +/* Copyright (c) 2022, SUSE. */ + +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "mptcp_sock.skel.h" + +#ifndef TCP_CA_NAME_MAX +#define TCP_CA_NAME_MAX 16 +#endif + +struct mptcp_storage { + __u32 invoked; + __u32 is_mptcp; + struct sock *sk; + __u32 token; + struct sock *first; + char ca_name[TCP_CA_NAME_MAX]; +}; + +static int verify_tsk(int map_fd, int client_fd) +{ + int err, cfd = client_fd; + struct mptcp_storage val; + + err = bpf_map_lookup_elem(map_fd, &cfd, &val); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + return err; + + if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count")) + err++; + + if (!ASSERT_EQ(val.is_mptcp, 0, "unexpected is_mptcp")) + err++; + + return err; +} + +static void get_msk_ca_name(char ca_name[]) +{ + size_t len; + int fd; + + fd = open("/proc/sys/net/ipv4/tcp_congestion_control", O_RDONLY); + if (!ASSERT_GE(fd, 0, "failed to open tcp_congestion_control")) + return; + + len = read(fd, ca_name, TCP_CA_NAME_MAX); + if (!ASSERT_GT(len, 0, "failed to read ca_name")) + goto err; + + if (len > 0 && ca_name[len - 1] == '\n') + ca_name[len - 1] = '\0'; + +err: + close(fd); +} + +static int verify_msk(int map_fd, int client_fd, __u32 token) +{ + char ca_name[TCP_CA_NAME_MAX]; + int err, cfd = client_fd; + struct mptcp_storage val; + + if (!ASSERT_GT(token, 0, "invalid token")) + return -1; + + get_msk_ca_name(ca_name); + + err = bpf_map_lookup_elem(map_fd, &cfd, &val); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + return err; + + if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count")) + err++; + + if (!ASSERT_EQ(val.is_mptcp, 1, "unexpected is_mptcp")) + err++; + + if (!ASSERT_EQ(val.token, token, "unexpected token")) + err++; + + if (!ASSERT_EQ(val.first, val.sk, "unexpected first")) + err++; + + if (!ASSERT_STRNEQ(val.ca_name, ca_name, TCP_CA_NAME_MAX, "unexpected ca_name")) + err++; + + return err; +} + +static int run_test(int cgroup_fd, int server_fd, bool is_mptcp) +{ + int client_fd, prog_fd, map_fd, err; + struct mptcp_sock *sock_skel; + + sock_skel = mptcp_sock__open_and_load(); + if (!ASSERT_OK_PTR(sock_skel, "skel_open_load")) + return -EIO; + + err = mptcp_sock__attach(sock_skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + prog_fd = bpf_program__fd(sock_skel->progs._sockops); + if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) { + err = -EIO; + goto out; + } + + map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map); + if (!ASSERT_GE(map_fd, 0, "bpf_map__fd")) { + err = -EIO; + goto out; + } + + err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + client_fd = connect_to_fd(server_fd, 0); + if (!ASSERT_GE(client_fd, 0, "connect to fd")) { + err = -EIO; + goto out; + } + + err += is_mptcp ? verify_msk(map_fd, client_fd, sock_skel->bss->token) : + verify_tsk(map_fd, client_fd); + + close(client_fd); + +out: + mptcp_sock__destroy(sock_skel); + return err; +} + +static void test_base(void) +{ + int server_fd, cgroup_fd; + + cgroup_fd = test__join_cgroup("/mptcp"); + if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) + return; + + /* without MPTCP */ + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_server")) + goto with_mptcp; + + ASSERT_OK(run_test(cgroup_fd, server_fd, false), "run_test tcp"); + + close(server_fd); + +with_mptcp: + /* with MPTCP */ + server_fd = start_mptcp_server(AF_INET, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_mptcp_server")) + goto close_cgroup_fd; + + ASSERT_OK(run_test(cgroup_fd, server_fd, true), "run_test mptcp"); + + close(server_fd); + +close_cgroup_fd: + close(cgroup_fd); +} + +void test_mptcp(void) +{ + if (test__start_subtest("base")) + test_base(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c index 954964f0ac3d..d3915c58d0e1 100644 --- a/tools/testing/selftests/bpf/prog_tests/netcnt.c +++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c @@ -25,7 +25,7 @@ void serial_test_netcnt(void) if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load")) return; - nproc = get_nprocs_conf(); + nproc = bpf_num_possible_cpus(); percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)")) goto err; diff --git a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c new file mode 100644 index 000000000000..14f2796076e0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include "test_progs.h" +#include "testing_helpers.h" + +static void clear_test_state(struct test_state *state) +{ + state->error_cnt = 0; + state->sub_succ_cnt = 0; + state->skip_cnt = 0; +} + +void test_prog_tests_framework(void) +{ + struct test_state *state = env.test_state; + + /* in all the ASSERT calls below we need to return on the first + * error due to the fact that we are cleaning the test state after + * each dummy subtest + */ + + /* test we properly count skipped tests with subtests */ + if (test__start_subtest("test_good_subtest")) + test__end_subtest(); + if (!ASSERT_EQ(state->skip_cnt, 0, "skip_cnt_check")) + return; + if (!ASSERT_EQ(state->error_cnt, 0, "error_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 1, "subtest_num_check")) + return; + clear_test_state(state); + + if (test__start_subtest("test_skip_subtest")) { + test__skip(); + test__end_subtest(); + } + if (test__start_subtest("test_skip_subtest")) { + test__skip(); + test__end_subtest(); + } + if (!ASSERT_EQ(state->skip_cnt, 2, "skip_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 3, "subtest_num_check")) + return; + clear_test_state(state); + + if (test__start_subtest("test_fail_subtest")) { + test__fail(); + test__end_subtest(); + } + if (!ASSERT_EQ(state->error_cnt, 1, "error_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 4, "subtest_num_check")) + return; + clear_test_state(state); +} diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c index 873323fb18ba..739d2ea6ca55 100644 --- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -1,21 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> -static void toggle_object_autoload_progs(const struct bpf_object *obj, - const char *name_load) -{ - struct bpf_program *prog; - - bpf_object__for_each_program(prog, obj) { - const char *name = bpf_program__name(prog); - - if (!strcmp(name_load, name)) - bpf_program__set_autoload(prog, true); - else - bpf_program__set_autoload(prog, false); - } -} - void test_reference_tracking(void) { const char *file = "test_sk_lookup_kern.o"; @@ -39,6 +24,7 @@ void test_reference_tracking(void) goto cleanup; bpf_object__for_each_program(prog, obj_iter) { + struct bpf_program *p; const char *name; name = bpf_program__name(prog); @@ -49,7 +35,12 @@ void test_reference_tracking(void) if (!ASSERT_OK_PTR(obj, "obj_open_file")) goto cleanup; - toggle_object_autoload_progs(obj, name); + /* all programs are not loaded by default, so just set + * autoload to true for the single prog under test + */ + p = bpf_object__find_program_by_name(obj, name); + bpf_program__set_autoload(p, true); + /* Expect verifier failure if test name has 'err' */ if (strncmp(name, "err_", sizeof("err_") - 1) == 0) { libbpf_print_fn_t old_print_fn; diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c index e945195b24c9..eb5f7f5aa81a 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c @@ -50,18 +50,6 @@ void test_ringbuf_multi(void) if (CHECK(!skel, "skel_open", "skeleton open failed\n")) return; - err = bpf_map__set_max_entries(skel->maps.ringbuf1, page_size); - if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) - goto cleanup; - - err = bpf_map__set_max_entries(skel->maps.ringbuf2, page_size); - if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) - goto cleanup; - - err = bpf_map__set_max_entries(bpf_map__inner_map(skel->maps.ringbuf_arr), page_size); - if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) - goto cleanup; - proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL); if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c new file mode 100644 index 000000000000..d7f83c0a40a5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "skb_load_bytes.skel.h" + +void test_skb_load_bytes(void) +{ + struct skb_load_bytes *skel; + int err, prog_fd, test_result; + struct __sk_buff skb = { 0 }; + + LIBBPF_OPTS(bpf_test_run_opts, tattr, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + + skel = skb_load_bytes__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + prog_fd = bpf_program__fd(skel->progs.skb_process); + if (!ASSERT_GE(prog_fd, 0, "prog_fd")) + goto out; + + skel->bss->load_offset = (uint32_t)(-1); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto out; + test_result = skel->bss->test_result; + if (!ASSERT_EQ(test_result, -EFAULT, "offset -1")) + goto out; + + skel->bss->load_offset = (uint32_t)10; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto out; + test_result = skel->bss->test_result; + if (!ASSERT_EQ(test_result, 0, "offset 10")) + goto out; + +out: + skb_load_bytes__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c index 394ebfc3bbf3..4be6fdb78c6a 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -83,8 +83,6 @@ cleanup: test_snprintf__destroy(skel); } -#define min(a, b) ((a) < (b) ? (a) : (b)) - /* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */ static int load_single_snprintf(char *fmt) { @@ -95,7 +93,7 @@ static int load_single_snprintf(char *fmt) if (!skel) return -EINVAL; - memcpy(skel->rodata->fmt, fmt, min(strlen(fmt) + 1, 10)); + memcpy(skel->rodata->fmt, fmt, MIN(strlen(fmt) + 1, 10)); ret = test_snprintf_single__load(skel); test_snprintf_single__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c index e8399ae50e77..9ad09a6c538a 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c @@ -8,7 +8,7 @@ void test_stacktrace_build_id(void) int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; struct test_stacktrace_build_id *skel; int err, stack_trace_len; - __u32 key, previous_key, val, duration = 0; + __u32 key, prev_key, val, duration = 0; char buf[256]; int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; @@ -58,7 +58,7 @@ retry: "err %d errno %d\n", err, errno)) goto cleanup; - err = bpf_map_get_next_key(stackmap_fd, NULL, &key); + err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key)); if (CHECK(err, "get_next_key from stackmap", "err %d, errno %d\n", err, errno)) goto cleanup; @@ -79,8 +79,8 @@ retry: if (strstr(buf, build_id) != NULL) build_id_matches = 1; } - previous_key = key; - } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + prev_key = key; + } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0); /* stack_map_get_build_id_offset() is racy and sometimes can return * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index f45a1d7b0a28..f4ea1a215ce4 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -27,7 +27,7 @@ void test_stacktrace_build_id_nmi(void) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, }; - __u32 key, previous_key, val, duration = 0; + __u32 key, prev_key, val, duration = 0; char buf[256]; int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; @@ -100,7 +100,7 @@ retry: "err %d errno %d\n", err, errno)) goto cleanup; - err = bpf_map_get_next_key(stackmap_fd, NULL, &key); + err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key)); if (CHECK(err, "get_next_key from stackmap", "err %d, errno %d\n", err, errno)) goto cleanup; @@ -108,7 +108,8 @@ retry: do { char build_id[64]; - err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); + err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key), + id_offs, sizeof(id_offs), 0); if (CHECK(err, "lookup_elem from stackmap", "err %d, errno %d\n", err, errno)) goto cleanup; @@ -121,8 +122,8 @@ retry: if (strstr(buf, build_id) != NULL) build_id_matches = 1; } - previous_key = key; - } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + prev_key = key; + } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0); /* stack_map_get_build_id_offset() is racy and sometimes can return * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 7ad66a247c02..958dae769c52 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -949,7 +949,6 @@ fail: return -1; } -#define MAX(a, b) ((a) > (b) ? (a) : (b)) enum { SRC_TO_TARGET = 0, TARGET_TO_SRC = 1, diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c index 509e21d5cb9d..b90ee47d3111 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -81,6 +81,7 @@ void test_test_global_funcs(void) { "test_global_func14.o", "reference type('FWD S') size cannot be determined" }, { "test_global_func15.o", "At program exit the register R0 has value" }, { "test_global_func16.o", "invalid indirect read from stack" }, + { "test_global_func17.o", "Caller passes invalid args into func#1" }, }; libbpf_print_fn_t old_print_fn = NULL; int err, i, duration = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c index b57a3009465f..7ddd6615b7e7 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c +++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c @@ -44,16 +44,12 @@ static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name, static void test_strncmp_ret(void) { struct strncmp_test *skel; - struct bpf_program *prog; int err, got; skel = strncmp_test__open(); if (!ASSERT_OK_PTR(skel, "strncmp_test open")) return; - bpf_object__for_each_program(prog, skel->obj) - bpf_program__set_autoload(prog, false); - bpf_program__set_autoload(skel->progs.do_strncmp, true); err = strncmp_test__load(skel); @@ -91,18 +87,13 @@ out: static void test_strncmp_bad_not_const_str_size(void) { struct strncmp_test *skel; - struct bpf_program *prog; int err; skel = strncmp_test__open(); if (!ASSERT_OK_PTR(skel, "strncmp_test open")) return; - bpf_object__for_each_program(prog, skel->obj) - bpf_program__set_autoload(prog, false); - - bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, - true); + bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, true); err = strncmp_test__load(skel); ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size"); @@ -113,18 +104,13 @@ static void test_strncmp_bad_not_const_str_size(void) static void test_strncmp_bad_writable_target(void) { struct strncmp_test *skel; - struct bpf_program *prog; int err; skel = strncmp_test__open(); if (!ASSERT_OK_PTR(skel, "strncmp_test open")) return; - bpf_object__for_each_program(prog, skel->obj) - bpf_program__set_autoload(prog, false); - - bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, - true); + bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, true); err = strncmp_test__load(skel); ASSERT_ERR(err, "strncmp_test load bad_writable_target"); @@ -135,18 +121,13 @@ static void test_strncmp_bad_writable_target(void) static void test_strncmp_bad_not_null_term_target(void) { struct strncmp_test *skel; - struct bpf_program *prog; int err; skel = strncmp_test__open(); if (!ASSERT_OK_PTR(skel, "strncmp_test open")) return; - bpf_object__for_each_program(prog, skel->obj) - bpf_program__set_autoload(prog, false); - - bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, - true); + bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, true); err = strncmp_test__load(skel); ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target"); diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c new file mode 100644 index 000000000000..3bba4a2a0530 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * End-to-end eBPF tunnel test suite + * The file tests BPF network tunnel implementation. + * + * Topology: + * --------- + * root namespace | at_ns0 namespace + * | + * ----------- | ----------- + * | tnl dev | | | tnl dev | (overlay network) + * ----------- | ----------- + * metadata-mode | metadata-mode + * with bpf | with bpf + * | + * ---------- | ---------- + * | veth1 | --------- | veth0 | (underlay network) + * ---------- peer ---------- + * + * + * Device Configuration + * -------------------- + * root namespace with metadata-mode tunnel + BPF + * Device names and addresses: + * veth1 IP 1: 172.16.1.200, IPv6: 00::22 (underlay) + * IP 2: 172.16.1.20, IPv6: 00::bb (underlay) + * tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay) + * + * Namespace at_ns0 with native tunnel + * Device names and addresses: + * veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay) + * tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay) + * + * + * End-to-end ping packet flow + * --------------------------- + * Most of the tests start by namespace creation, device configuration, + * then ping the underlay and overlay network. When doing 'ping 10.1.1.100' + * from root namespace, the following operations happen: + * 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev. + * 2) Tnl device's egress BPF program is triggered and set the tunnel metadata, + * with local_ip=172.16.1.200, remote_ip=172.16.1.100. BPF program choose + * the primary or secondary ip of veth1 as the local ip of tunnel. The + * choice is made based on the value of bpf map local_ip_map. + * 3) Outer tunnel header is prepended and route the packet to veth1's egress. + * 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0. + * 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet. + * 6) Forward the packet to the overlay tnl dev. + */ + +#include <arpa/inet.h> +#include <linux/if_tun.h> +#include <linux/limits.h> +#include <linux/sysctl.h> +#include <linux/time_types.h> +#include <linux/net_tstamp.h> +#include <net/if.h> +#include <stdbool.h> +#include <stdio.h> +#include <sys/stat.h> +#include <unistd.h> + +#include "test_progs.h" +#include "network_helpers.h" +#include "test_tunnel_kern.skel.h" + +#define IP4_ADDR_VETH0 "172.16.1.100" +#define IP4_ADDR1_VETH1 "172.16.1.200" +#define IP4_ADDR2_VETH1 "172.16.1.20" +#define IP4_ADDR_TUNL_DEV0 "10.1.1.100" +#define IP4_ADDR_TUNL_DEV1 "10.1.1.200" + +#define IP6_ADDR_VETH0 "::11" +#define IP6_ADDR1_VETH1 "::22" +#define IP6_ADDR2_VETH1 "::bb" + +#define IP4_ADDR1_HEX_VETH1 0xac1001c8 +#define IP4_ADDR2_HEX_VETH1 0xac100114 +#define IP6_ADDR1_HEX_VETH1 0x22 +#define IP6_ADDR2_HEX_VETH1 0xbb + +#define MAC_TUNL_DEV0 "52:54:00:d9:01:00" +#define MAC_TUNL_DEV1 "52:54:00:d9:02:00" + +#define VXLAN_TUNL_DEV0 "vxlan00" +#define VXLAN_TUNL_DEV1 "vxlan11" +#define IP6VXLAN_TUNL_DEV0 "ip6vxlan00" +#define IP6VXLAN_TUNL_DEV1 "ip6vxlan11" + +#define PING_ARGS "-i 0.01 -c 3 -w 10 -q" + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto fail; \ + }) + +#define SYS_NOFAIL(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + system(cmd); \ + }) + +static int config_device(void) +{ + SYS("ip netns add at_ns0"); + SYS("ip link add veth0 type veth peer name veth1"); + SYS("ip link set veth0 netns at_ns0"); + SYS("ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1"); + SYS("ip addr add " IP4_ADDR2_VETH1 "/24 dev veth1"); + SYS("ip link set dev veth1 up mtu 1500"); + SYS("ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0"); + SYS("ip netns exec at_ns0 ip link set dev veth0 up mtu 1500"); + + return 0; +fail: + return -1; +} + +static void cleanup(void) +{ + SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0"); + SYS_NOFAIL("ip link del veth1 2> /dev/null"); + SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1); + SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1); +} + +static int add_vxlan_tunnel(void) +{ + /* at_ns0 namespace */ + SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external gbp dstport 4789", + VXLAN_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip link set dev %s address %s up", + VXLAN_TUNL_DEV0, MAC_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip addr add dev %s %s/24", + VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s", + IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0); + + /* root namespace */ + SYS("ip link add dev %s type vxlan external gbp dstport 4789", + VXLAN_TUNL_DEV1); + SYS("ip link set dev %s address %s up", VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); + SYS("ip addr add dev %s %s/24", VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); + SYS("ip neigh add %s lladdr %s dev %s", + IP4_ADDR_TUNL_DEV0, MAC_TUNL_DEV0, VXLAN_TUNL_DEV1); + + return 0; +fail: + return -1; +} + +static void delete_vxlan_tunnel(void) +{ + SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s", + VXLAN_TUNL_DEV0); + SYS_NOFAIL("ip link delete dev %s", VXLAN_TUNL_DEV1); +} + +static int add_ip6vxlan_tunnel(void) +{ + SYS("ip netns exec at_ns0 ip -6 addr add %s/96 dev veth0", + IP6_ADDR_VETH0); + SYS("ip netns exec at_ns0 ip link set dev veth0 up"); + SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR1_VETH1); + SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR2_VETH1); + SYS("ip link set dev veth1 up"); + + /* at_ns0 namespace */ + SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external dstport 4789", + IP6VXLAN_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip addr add dev %s %s/24", + IP6VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip link set dev %s address %s up", + IP6VXLAN_TUNL_DEV0, MAC_TUNL_DEV0); + + /* root namespace */ + SYS("ip link add dev %s type vxlan external dstport 4789", + IP6VXLAN_TUNL_DEV1); + SYS("ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); + SYS("ip link set dev %s address %s up", + IP6VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); + + return 0; +fail: + return -1; +} + +static void delete_ip6vxlan_tunnel(void) +{ + SYS_NOFAIL("ip netns exec at_ns0 ip -6 addr delete %s/96 dev veth0", + IP6_ADDR_VETH0); + SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR1_VETH1); + SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR2_VETH1); + SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s", + IP6VXLAN_TUNL_DEV0); + SYS_NOFAIL("ip link delete dev %s", IP6VXLAN_TUNL_DEV1); +} + +static int test_ping(int family, const char *addr) +{ + SYS("%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr); + return 0; +fail: + return -1; +} + +static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, + .priority = 1, .prog_fd = igr_fd); + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1, + .priority = 1, .prog_fd = egr_fd); + int ret; + + ret = bpf_tc_hook_create(hook); + if (!ASSERT_OK(ret, "create tc hook")) + return ret; + + if (igr_fd >= 0) { + hook->attach_point = BPF_TC_INGRESS; + ret = bpf_tc_attach(hook, &opts1); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + } + + if (egr_fd >= 0) { + hook->attach_point = BPF_TC_EGRESS; + ret = bpf_tc_attach(hook, &opts2); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + } + + return 0; +} + +static void test_vxlan_tunnel(void) +{ + struct test_tunnel_kern *skel = NULL; + struct nstoken *nstoken; + int local_ip_map_fd = -1; + int set_src_prog_fd, get_src_prog_fd; + int set_dst_prog_fd; + int key = 0, ifindex = -1; + uint local_ip; + int err; + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + /* add vxlan tunnel */ + err = add_vxlan_tunnel(); + if (!ASSERT_OK(err, "add vxlan tunnel")) + goto done; + + /* load and attach bpf prog to tunnel dev tc hook point */ + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + goto done; + ifindex = if_nametoindex(VXLAN_TUNL_DEV1); + if (!ASSERT_NEQ(ifindex, 0, "vxlan11 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src); + set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src); + if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + goto done; + + /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ + nstoken = open_netns("at_ns0"); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + ifindex = if_nametoindex(VXLAN_TUNL_DEV0); + if (!ASSERT_NEQ(ifindex, 0, "vxlan00 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst); + if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd)) + goto done; + close_netns(nstoken); + + /* use veth1 ip 2 as tunnel source ip */ + local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map); + if (!ASSERT_GE(local_ip_map_fd, 0, "bpf_map__fd")) + goto done; + local_ip = IP4_ADDR2_HEX_VETH1; + err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY); + if (!ASSERT_OK(err, "update bpf local_ip_map")) + goto done; + + /* ping test */ + err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); + if (!ASSERT_OK(err, "test_ping")) + goto done; + +done: + /* delete vxlan tunnel */ + delete_vxlan_tunnel(); + if (local_ip_map_fd >= 0) + close(local_ip_map_fd); + if (skel) + test_tunnel_kern__destroy(skel); +} + +static void test_ip6vxlan_tunnel(void) +{ + struct test_tunnel_kern *skel = NULL; + struct nstoken *nstoken; + int local_ip_map_fd = -1; + int set_src_prog_fd, get_src_prog_fd; + int set_dst_prog_fd; + int key = 0, ifindex = -1; + uint local_ip; + int err; + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + /* add vxlan tunnel */ + err = add_ip6vxlan_tunnel(); + if (!ASSERT_OK(err, "add_ip6vxlan_tunnel")) + goto done; + + /* load and attach bpf prog to tunnel dev tc hook point */ + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + goto done; + ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV1); + if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan11 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src); + set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src); + if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + goto done; + + /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ + nstoken = open_netns("at_ns0"); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV0); + if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan00 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst); + if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd)) + goto done; + close_netns(nstoken); + + /* use veth1 ip 2 as tunnel source ip */ + local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map); + if (!ASSERT_GE(local_ip_map_fd, 0, "get local_ip_map fd")) + goto done; + local_ip = IP6_ADDR2_HEX_VETH1; + err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY); + if (!ASSERT_OK(err, "update bpf local_ip_map")) + goto done; + + /* ping test */ + err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); + if (!ASSERT_OK(err, "test_ping")) + goto done; + +done: + /* delete ipv6 vxlan tunnel */ + delete_ip6vxlan_tunnel(); + if (local_ip_map_fd >= 0) + close(local_ip_map_fd); + if (skel) + test_tunnel_kern__destroy(skel); +} + +#define RUN_TEST(name) \ + ({ \ + if (test__start_subtest(#name)) { \ + test_ ## name(); \ + } \ + }) + +static void *test_tunnel_run_tests(void *arg) +{ + cleanup(); + config_device(); + + RUN_TEST(vxlan_tunnel); + RUN_TEST(ip6vxlan_tunnel); + + cleanup(); + + return NULL; +} + +void serial_test_tunnel(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_tunnel_run_tests, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c index 2ee5f5ae11d4..9ff7843909e7 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer_mim.c +++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c @@ -35,7 +35,7 @@ static int timer_mim(struct timer_mim *timer_skel) ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok"); close(bpf_map__fd(timer_skel->maps.inner_htab)); - err = bpf_map_delete_elem(bpf_map__fd(timer_skel->maps.outer_arr), &key1); + err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0); ASSERT_EQ(err, 0, "delete inner map"); /* check that timer_cb[12] are no longer running */ diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c index 9c795ee52b7b..b0acbda6dbf5 100644 --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -1,126 +1,94 @@ // SPDX-License-Identifier: GPL-2.0-only #define _GNU_SOURCE -#include <sched.h> -#include <sys/prctl.h> #include <test_progs.h> #define MAX_TRAMP_PROGS 38 struct inst { struct bpf_object *obj; - struct bpf_link *link_fentry; - struct bpf_link *link_fexit; + struct bpf_link *link; }; -static int test_task_rename(void) -{ - int fd, duration = 0, err; - char buf[] = "test_overhead"; - - fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); - if (CHECK(fd < 0, "open /proc", "err %d", errno)) - return -1; - err = write(fd, buf, sizeof(buf)); - if (err < 0) { - CHECK(err < 0, "task rename", "err %d", errno); - close(fd); - return -1; - } - close(fd); - return 0; -} - -static struct bpf_link *load(struct bpf_object *obj, const char *name) +static struct bpf_program *load_prog(char *file, char *name, struct inst *inst) { + struct bpf_object *obj; struct bpf_program *prog; - int duration = 0; + int err; + + obj = bpf_object__open_file(file, NULL); + if (!ASSERT_OK_PTR(obj, "obj_open_file")) + return NULL; + + inst->obj = obj; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + return NULL; prog = bpf_object__find_program_by_name(obj, name); - if (CHECK(!prog, "find_probe", "prog '%s' not found\n", name)) - return ERR_PTR(-EINVAL); - return bpf_program__attach_trace(prog); + if (!ASSERT_OK_PTR(prog, "obj_find_prog")) + return NULL; + + return prog; } /* TODO: use different target function to run in concurrent mode */ void serial_test_trampoline_count(void) { - const char *fentry_name = "prog1"; - const char *fexit_name = "prog2"; - const char *object = "test_trampoline_count.o"; - struct inst inst[MAX_TRAMP_PROGS] = {}; - int err, i = 0, duration = 0; - struct bpf_object *obj; + char *file = "test_trampoline_count.o"; + char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" }; + struct inst inst[MAX_TRAMP_PROGS + 1] = {}; + struct bpf_program *prog; struct bpf_link *link; - char comm[16] = {}; + int prog_fd, err, i; + LIBBPF_OPTS(bpf_test_run_opts, opts); /* attach 'allowed' trampoline programs */ for (i = 0; i < MAX_TRAMP_PROGS; i++) { - obj = bpf_object__open_file(object, NULL); - if (!ASSERT_OK_PTR(obj, "obj_open_file")) { - obj = NULL; + prog = load_prog(file, progs[i % ARRAY_SIZE(progs)], &inst[i]); + if (!prog) goto cleanup; - } - err = bpf_object__load(obj); - if (CHECK(err, "obj_load", "err %d\n", err)) + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "attach_prog")) goto cleanup; - inst[i].obj = obj; - obj = NULL; - - if (rand() % 2) { - link = load(inst[i].obj, fentry_name); - if (!ASSERT_OK_PTR(link, "attach_prog")) { - link = NULL; - goto cleanup; - } - inst[i].link_fentry = link; - } else { - link = load(inst[i].obj, fexit_name); - if (!ASSERT_OK_PTR(link, "attach_prog")) { - link = NULL; - goto cleanup; - } - inst[i].link_fexit = link; - } + + inst[i].link = link; } /* and try 1 extra.. */ - obj = bpf_object__open_file(object, NULL); - if (!ASSERT_OK_PTR(obj, "obj_open_file")) { - obj = NULL; + prog = load_prog(file, "fmod_ret_test", &inst[i]); + if (!prog) goto cleanup; - } - - err = bpf_object__load(obj); - if (CHECK(err, "obj_load", "err %d\n", err)) - goto cleanup_extra; /* ..that needs to fail */ - link = load(obj, fentry_name); - err = libbpf_get_error(link); - if (!ASSERT_ERR_PTR(link, "cannot attach over the limit")) { - bpf_link__destroy(link); - goto cleanup_extra; + link = bpf_program__attach(prog); + if (!ASSERT_ERR_PTR(link, "attach_prog")) { + inst[i].link = link; + goto cleanup; } /* with E2BIG error */ - ASSERT_EQ(err, -E2BIG, "proper error check"); - ASSERT_EQ(link, NULL, "ptr_is_null"); + if (!ASSERT_EQ(libbpf_get_error(link), -E2BIG, "E2BIG")) + goto cleanup; + if (!ASSERT_EQ(link, NULL, "ptr_is_null")) + goto cleanup; /* and finaly execute the probe */ - if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L))) - goto cleanup_extra; - CHECK_FAIL(test_task_rename()); - CHECK_FAIL(prctl(PR_SET_NAME, comm, 0L, 0L, 0L)); + prog_fd = bpf_program__fd(prog); + if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) + goto cleanup; + + err = bpf_prog_test_run_opts(prog_fd, &opts); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto cleanup; + + ASSERT_EQ(opts.retval & 0xffff, 4, "bpf_modify_return_test.result"); + ASSERT_EQ(opts.retval >> 16, 1, "bpf_modify_return_test.side_effect"); -cleanup_extra: - bpf_object__close(obj); cleanup: - if (i >= MAX_TRAMP_PROGS) - i = MAX_TRAMP_PROGS - 1; for (; i >= 0; i--) { - bpf_link__destroy(inst[i].link_fentry); - bpf_link__destroy(inst[i].link_fexit); + bpf_link__destroy(inst[i].link); bpf_object__close(inst[i].obj); } } diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c new file mode 100644 index 000000000000..1ed3cc2092db --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Oracle and/or its affiliates. */ + +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_unpriv_bpf_disabled.skel.h" + +#include "cap_helpers.h" + +/* Using CAP_LAST_CAP is risky here, since it can get pulled in from + * an old /usr/include/linux/capability.h and be < CAP_BPF; as a result + * CAP_BPF would not be included in ALL_CAPS. Instead use CAP_BPF as + * we know its value is correct since it is explicitly defined in + * cap_helpers.h. + */ +#define ALL_CAPS ((2ULL << CAP_BPF) - 1) + +#define PINPATH "/sys/fs/bpf/unpriv_bpf_disabled_" +#define NUM_MAPS 7 + +static __u32 got_perfbuf_val; +static __u32 got_ringbuf_val; + +static int process_ringbuf(void *ctx, void *data, size_t len) +{ + if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid")) + got_ringbuf_val = *(__u32 *)data; + return 0; +} + +static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len) +{ + if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid")) + got_perfbuf_val = *(__u32 *)data; +} + +static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val) +{ + int ret = 0; + FILE *fp; + + fp = fopen(sysctl_path, "r+"); + if (!fp) + return -errno; + if (old_val && fscanf(fp, "%s", old_val) <= 0) { + ret = -ENOENT; + } else if (!old_val || strcmp(old_val, new_val) != 0) { + fseek(fp, 0, SEEK_SET); + if (fprintf(fp, "%s", new_val) < 0) + ret = -errno; + } + fclose(fp); + + return ret; +} + +static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel, + __u32 prog_id, int prog_fd, int perf_fd, + char **map_paths, int *map_fds) +{ + struct perf_buffer *perfbuf = NULL; + struct ring_buffer *ringbuf = NULL; + int i, nr_cpus, link_fd = -1; + + nr_cpus = bpf_num_possible_cpus(); + + skel->bss->perfbuf_val = 1; + skel->bss->ringbuf_val = 2; + + /* Positive tests for unprivileged BPF disabled. Verify we can + * - retrieve and interact with pinned maps; + * - set up and interact with perf buffer; + * - set up and interact with ring buffer; + * - create a link + */ + perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL, + NULL); + if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new")) + goto cleanup; + + ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL); + if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new")) + goto cleanup; + + /* trigger & validate perf event, ringbuf output */ + usleep(1); + + ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll"); + ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val"); + ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume"); + ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val"); + + for (i = 0; i < NUM_MAPS; i++) { + map_fds[i] = bpf_obj_get(map_paths[i]); + if (!ASSERT_GT(map_fds[i], -1, "obj_get")) + goto cleanup; + } + + for (i = 0; i < NUM_MAPS; i++) { + bool prog_array = strstr(map_paths[i], "prog_array") != NULL; + bool array = strstr(map_paths[i], "array") != NULL; + bool buf = strstr(map_paths[i], "buf") != NULL; + __u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus]; + __u32 expected_val = 1; + int j; + + /* skip ringbuf, perfbuf */ + if (buf) + continue; + + for (j = 0; j < nr_cpus; j++) + vals[j] = expected_val; + + if (prog_array) { + /* need valid prog array value */ + vals[0] = prog_fd; + /* prog array lookup returns prog id, not fd */ + expected_val = prog_id; + } + ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem"); + ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem"); + ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values"); + if (!array) + ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem"); + } + + link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd, + BPF_PERF_EVENT, NULL); + ASSERT_GT(link_fd, 0, "link_create"); + +cleanup: + if (link_fd) + close(link_fd); + if (perfbuf) + perf_buffer__free(perfbuf); + if (ringbuf) + ring_buffer__free(ringbuf); +} + +static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel, + __u32 prog_id, int prog_fd, int perf_fd, + char **map_paths, int *map_fds) +{ + const struct bpf_insn prog_insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, load_opts); + struct bpf_map_info map_info = {}; + __u32 map_info_len = sizeof(map_info); + struct bpf_link_info link_info = {}; + __u32 link_info_len = sizeof(link_info); + struct btf *btf = NULL; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + __u32 next; + int i; + + /* Negative tests for unprivileged BPF disabled. Verify we cannot + * - load BPF programs; + * - create BPF maps; + * - get a prog/map/link fd by id; + * - get next prog/map/link id + * - query prog + * - BTF load + */ + ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL", + prog_insns, prog_insn_cnt, &load_opts), + -EPERM, "prog_load_fails"); + + for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++) + ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL), + -EPERM, "map_create_fails"); + + ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails"); + ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails"); + ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails"); + + if (ASSERT_OK(bpf_obj_get_info_by_fd(map_fds[0], &map_info, &map_info_len), + "obj_get_info_by_fd")) { + ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails"); + ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM, + "map_get_next_id_fails"); + } + ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails"); + + if (ASSERT_OK(bpf_obj_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter), + &link_info, &link_info_len), + "obj_get_info_by_fd")) { + ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails"); + ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM, + "link_get_next_id_fails"); + } + ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails"); + + ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids, + &prog_cnt), -EPERM, "prog_query_fails"); + + btf = btf__new_empty(); + if (ASSERT_OK_PTR(btf, "empty_btf") && + ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) { + const void *raw_btf_data; + __u32 raw_btf_size; + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good")) + ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM, + "bpf_btf_load_fails"); + } + btf__free(btf); +} + +void test_unpriv_bpf_disabled(void) +{ + char *map_paths[NUM_MAPS] = { PINPATH "array", + PINPATH "percpu_array", + PINPATH "hash", + PINPATH "percpu_hash", + PINPATH "perfbuf", + PINPATH "ringbuf", + PINPATH "prog_array" }; + int map_fds[NUM_MAPS]; + struct test_unpriv_bpf_disabled *skel; + char unprivileged_bpf_disabled_orig[32] = {}; + char perf_event_paranoid_orig[32] = {}; + struct bpf_prog_info prog_info = {}; + __u32 prog_info_len = sizeof(prog_info); + struct perf_event_attr attr = {}; + int prog_fd, perf_fd = -1, i, ret; + __u64 save_caps = 0; + __u32 prog_id; + + skel = test_unpriv_bpf_disabled__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->test_pid = getpid(); + + map_fds[0] = bpf_map__fd(skel->maps.array); + map_fds[1] = bpf_map__fd(skel->maps.percpu_array); + map_fds[2] = bpf_map__fd(skel->maps.hash); + map_fds[3] = bpf_map__fd(skel->maps.percpu_hash); + map_fds[4] = bpf_map__fd(skel->maps.perfbuf); + map_fds[5] = bpf_map__fd(skel->maps.ringbuf); + map_fds[6] = bpf_map__fd(skel->maps.prog_array); + + for (i = 0; i < NUM_MAPS; i++) + ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd"); + + /* allow user without caps to use perf events */ + if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig, + "-1"), + "set_perf_event_paranoid")) + goto cleanup; + /* ensure unprivileged bpf disabled is set */ + ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", + unprivileged_bpf_disabled_orig, "2"); + if (ret == -EPERM) { + /* if unprivileged_bpf_disabled=1, we get -EPERM back; that's okay. */ + if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"), + "unprivileged_bpf_disabled_on")) + goto cleanup; + } else { + if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled")) + goto cleanup; + } + + prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter); + ASSERT_OK(bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len), + "obj_get_info_by_fd"); + prog_id = prog_info.id; + ASSERT_GT(prog_id, 0, "valid_prog_id"); + + attr.size = sizeof(attr); + attr.type = PERF_TYPE_SOFTWARE; + attr.config = PERF_COUNT_SW_CPU_CLOCK; + attr.freq = 1; + attr.sample_freq = 1000; + perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); + if (!ASSERT_GE(perf_fd, 0, "perf_fd")) + goto cleanup; + + if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach")) + goto cleanup; + + if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps")) + goto cleanup; + + if (test__start_subtest("unpriv_bpf_disabled_positive")) + test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths, + map_fds); + + if (test__start_subtest("unpriv_bpf_disabled_negative")) + test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths, + map_fds); + +cleanup: + close(perf_fd); + if (save_caps) + cap_enable_effective(save_caps, NULL); + if (strlen(perf_event_paranoid_orig) > 0) + sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig); + if (strlen(unprivileged_bpf_disabled_orig) > 0) + sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL, + unprivileged_bpf_disabled_orig); + for (i = 0; i < NUM_MAPS; i++) + unlink(map_paths[i]); + test_unpriv_bpf_disabled__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c new file mode 100644 index 000000000000..35b87c7ba5be --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Oracle and/or its affiliates. */ + +#include <test_progs.h> +#include "test_uprobe_autoattach.skel.h" + +/* uprobe attach point */ +static noinline int autoattach_trigger_func(int arg) +{ + asm volatile (""); + return arg + 1; +} + +void test_uprobe_autoattach(void) +{ + struct test_uprobe_autoattach *skel; + int trigger_val = 100, trigger_ret; + size_t malloc_sz = 1; + char *mem; + + skel = test_uprobe_autoattach__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + if (!ASSERT_OK(test_uprobe_autoattach__attach(skel), "skel_attach")) + goto cleanup; + + skel->bss->test_pid = getpid(); + + /* trigger & validate uprobe & uretprobe */ + trigger_ret = autoattach_trigger_func(trigger_val); + + skel->bss->test_pid = getpid(); + + /* trigger & validate shared library u[ret]probes attached by name */ + mem = malloc(malloc_sz); + + ASSERT_EQ(skel->bss->uprobe_byname_parm1, trigger_val, "check_uprobe_byname_parm1"); + ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran"); + ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc"); + ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran"); + ASSERT_EQ(skel->bss->uprobe_byname2_parm1, malloc_sz, "check_uprobe_byname2_parm1"); + ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran"); + ASSERT_EQ(skel->bss->uretprobe_byname2_rc, mem, "check_uretprobe_byname2_rc"); + ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran"); + + free(mem); +cleanup: + test_uprobe_autoattach__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c new file mode 100644 index 000000000000..5f733d50b0d7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/usdt.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> + +#define _SDT_HAS_SEMAPHORES 1 +#include "../sdt.h" + +#include "test_usdt.skel.h" +#include "test_urandom_usdt.skel.h" + +int lets_test_this(int); + +static volatile int idx = 2; +static volatile __u64 bla = 0xFEDCBA9876543210ULL; +static volatile short nums[] = {-1, -2, -3, }; + +static volatile struct { + int x; + signed char y; +} t1 = { 1, -127 }; + +#define SEC(name) __attribute__((section(name), used)) + +unsigned short test_usdt0_semaphore SEC(".probes"); +unsigned short test_usdt3_semaphore SEC(".probes"); +unsigned short test_usdt12_semaphore SEC(".probes"); + +static void __always_inline trigger_func(int x) { + long y = 42; + + if (test_usdt0_semaphore) + STAP_PROBE(test, usdt0); + if (test_usdt3_semaphore) + STAP_PROBE3(test, usdt3, x, y, &bla); + if (test_usdt12_semaphore) { + STAP_PROBE12(test, usdt12, + x, x + 1, y, x + y, 5, + y / 7, bla, &bla, -9, nums[x], + nums[idx], t1.y); + } +} + +static void subtest_basic_usdt(void) +{ + LIBBPF_OPTS(bpf_usdt_opts, opts); + struct test_usdt *skel; + struct test_usdt__bss *bss; + int err; + + skel = test_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bss = skel->bss; + bss->my_pid = getpid(); + + err = test_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* usdt0 won't be auto-attached */ + opts.usdt_cookie = 0xcafedeadbeeffeed; + skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, + 0 /*self*/, "/proc/self/exe", + "test", "usdt0", &opts); + if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link")) + goto cleanup; + + trigger_func(1); + + ASSERT_EQ(bss->usdt0_called, 1, "usdt0_called"); + ASSERT_EQ(bss->usdt3_called, 1, "usdt3_called"); + ASSERT_EQ(bss->usdt12_called, 1, "usdt12_called"); + + ASSERT_EQ(bss->usdt0_cookie, 0xcafedeadbeeffeed, "usdt0_cookie"); + ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt"); + ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret"); + + /* auto-attached usdt3 gets default zero cookie value */ + ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie"); + ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt"); + + ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret"); + ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1"); + ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2"); + ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3"); + + /* auto-attached usdt12 gets default zero cookie value */ + ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie"); + ASSERT_EQ(bss->usdt12_arg_cnt, 12, "usdt12_arg_cnt"); + + ASSERT_EQ(bss->usdt12_args[0], 1, "usdt12_arg1"); + ASSERT_EQ(bss->usdt12_args[1], 1 + 1, "usdt12_arg2"); + ASSERT_EQ(bss->usdt12_args[2], 42, "usdt12_arg3"); + ASSERT_EQ(bss->usdt12_args[3], 42 + 1, "usdt12_arg4"); + ASSERT_EQ(bss->usdt12_args[4], 5, "usdt12_arg5"); + ASSERT_EQ(bss->usdt12_args[5], 42 / 7, "usdt12_arg6"); + ASSERT_EQ(bss->usdt12_args[6], bla, "usdt12_arg7"); + ASSERT_EQ(bss->usdt12_args[7], (uintptr_t)&bla, "usdt12_arg8"); + ASSERT_EQ(bss->usdt12_args[8], -9, "usdt12_arg9"); + ASSERT_EQ(bss->usdt12_args[9], nums[1], "usdt12_arg10"); + ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11"); + ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12"); + + /* trigger_func() is marked __always_inline, so USDT invocations will be + * inlined in two different places, meaning that each USDT will have + * at least 2 different places to be attached to. This verifies that + * bpf_program__attach_usdt() handles this properly and attaches to + * all possible places of USDT invocation. + */ + trigger_func(2); + + ASSERT_EQ(bss->usdt0_called, 2, "usdt0_called"); + ASSERT_EQ(bss->usdt3_called, 2, "usdt3_called"); + ASSERT_EQ(bss->usdt12_called, 2, "usdt12_called"); + + /* only check values that depend on trigger_func()'s input value */ + ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1"); + + ASSERT_EQ(bss->usdt12_args[0], 2, "usdt12_arg1"); + ASSERT_EQ(bss->usdt12_args[1], 2 + 1, "usdt12_arg2"); + ASSERT_EQ(bss->usdt12_args[3], 42 + 2, "usdt12_arg4"); + ASSERT_EQ(bss->usdt12_args[9], nums[2], "usdt12_arg10"); + + /* detach and re-attach usdt3 */ + bpf_link__destroy(skel->links.usdt3); + + opts.usdt_cookie = 0xBADC00C51E; + skel->links.usdt3 = bpf_program__attach_usdt(skel->progs.usdt3, -1 /* any pid */, + "/proc/self/exe", "test", "usdt3", &opts); + if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach")) + goto cleanup; + + trigger_func(3); + + ASSERT_EQ(bss->usdt3_called, 3, "usdt3_called"); + /* this time usdt3 has custom cookie */ + ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie"); + ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt"); + + ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret"); + ASSERT_EQ(bss->usdt3_args[0], 3, "usdt3_arg1"); + ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2"); + ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3"); + +cleanup: + test_usdt__destroy(skel); +} + +unsigned short test_usdt_100_semaphore SEC(".probes"); +unsigned short test_usdt_300_semaphore SEC(".probes"); +unsigned short test_usdt_400_semaphore SEC(".probes"); + +#define R10(F, X) F(X+0); F(X+1);F(X+2); F(X+3); F(X+4); \ + F(X+5); F(X+6); F(X+7); F(X+8); F(X+9); +#define R100(F, X) R10(F,X+ 0);R10(F,X+10);R10(F,X+20);R10(F,X+30);R10(F,X+40); \ + R10(F,X+50);R10(F,X+60);R10(F,X+70);R10(F,X+80);R10(F,X+90); + +/* carefully control that we get exactly 100 inlines by preventing inlining */ +static void __always_inline f100(int x) +{ + STAP_PROBE1(test, usdt_100, x); +} + +__weak void trigger_100_usdts(void) +{ + R100(f100, 0); +} + +/* we shouldn't be able to attach to test:usdt2_300 USDT as we don't have as + * many slots for specs. It's important that each STAP_PROBE2() invocation + * (after untolling) gets different arg spec due to compiler inlining i as + * a constant + */ +static void __always_inline f300(int x) +{ + STAP_PROBE1(test, usdt_300, x); +} + +__weak void trigger_300_usdts(void) +{ + R100(f300, 0); + R100(f300, 100); + R100(f300, 200); +} + +static void __always_inline f400(int x __attribute__((unused))) +{ + STAP_PROBE1(test, usdt_400, 400); +} + +/* this time we have 400 different USDT call sites, but they have uniform + * argument location, so libbpf's spec string deduplication logic should keep + * spec count use very small and so we should be able to attach to all 400 + * call sites + */ +__weak void trigger_400_usdts(void) +{ + R100(f400, 0); + R100(f400, 100); + R100(f400, 200); + R100(f400, 300); +} + +static void subtest_multispec_usdt(void) +{ + LIBBPF_OPTS(bpf_usdt_opts, opts); + struct test_usdt *skel; + struct test_usdt__bss *bss; + int err, i; + + skel = test_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bss = skel->bss; + bss->my_pid = getpid(); + + err = test_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* usdt_100 is auto-attached and there are 100 inlined call sites, + * let's validate that all of them are properly attached to and + * handled from BPF side + */ + trigger_100_usdts(); + + ASSERT_EQ(bss->usdt_100_called, 100, "usdt_100_called"); + ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum"); + + /* Stress test free spec ID tracking. By default libbpf allows up to + * 256 specs to be used, so if we don't return free spec IDs back + * after few detachments and re-attachments we should run out of + * available spec IDs. + */ + for (i = 0; i < 2; i++) { + bpf_link__destroy(skel->links.usdt_100); + + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, + "/proc/self/exe", + "test", "usdt_100", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_100_reattach")) + goto cleanup; + + bss->usdt_100_sum = 0; + trigger_100_usdts(); + + ASSERT_EQ(bss->usdt_100_called, (i + 1) * 100 + 100, "usdt_100_called"); + ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum"); + } + + /* Now let's step it up and try to attach USDT that requires more than + * 256 attach points with different specs for each. + * Note that we need trigger_300_usdts() only to actually have 300 + * USDT call sites, we are not going to actually trace them. + */ + trigger_300_usdts(); + + /* we'll reuse usdt_100 BPF program for usdt_300 test */ + bpf_link__destroy(skel->links.usdt_100); + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe", + "test", "usdt_300", NULL); + err = -errno; + if (!ASSERT_ERR_PTR(skel->links.usdt_100, "usdt_300_bad_attach")) + goto cleanup; + ASSERT_EQ(err, -E2BIG, "usdt_300_attach_err"); + + /* let's check that there are no "dangling" BPF programs attached due + * to partial success of the above test:usdt_300 attachment + */ + bss->usdt_100_called = 0; + bss->usdt_100_sum = 0; + + f300(777); /* this is 301st instance of usdt_300 */ + + ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called"); + ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum"); + + /* This time we have USDT with 400 inlined invocations, but arg specs + * should be the same across all sites, so libbpf will only need to + * use one spec and thus we'll be able to attach 400 uprobes + * successfully. + * + * Again, we are reusing usdt_100 BPF program. + */ + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, + "/proc/self/exe", + "test", "usdt_400", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_400_attach")) + goto cleanup; + + trigger_400_usdts(); + + ASSERT_EQ(bss->usdt_100_called, 400, "usdt_400_called"); + ASSERT_EQ(bss->usdt_100_sum, 400 * 400, "usdt_400_sum"); + +cleanup: + test_usdt__destroy(skel); +} + +static FILE *urand_spawn(int *pid) +{ + FILE *f; + + /* urandom_read's stdout is wired into f */ + f = popen("./urandom_read 1 report-pid", "r"); + if (!f) + return NULL; + + if (fscanf(f, "%d", pid) != 1) { + pclose(f); + return NULL; + } + + return f; +} + +static int urand_trigger(FILE **urand_pipe) +{ + int exit_code; + + /* pclose() waits for child process to exit and returns their exit code */ + exit_code = pclose(*urand_pipe); + *urand_pipe = NULL; + + return exit_code; +} + +static void subtest_urandom_usdt(bool auto_attach) +{ + struct test_urandom_usdt *skel; + struct test_urandom_usdt__bss *bss; + struct bpf_link *l; + FILE *urand_pipe = NULL; + int err, urand_pid = 0; + + skel = test_urandom_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + urand_pipe = urand_spawn(&urand_pid); + if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn")) + goto cleanup; + + bss = skel->bss; + bss->urand_pid = urand_pid; + + if (auto_attach) { + err = test_urandom_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_auto_attach")) + goto cleanup; + } else { + l = bpf_program__attach_usdt(skel->progs.urand_read_without_sema, + urand_pid, "./urandom_read", + "urand", "read_without_sema", NULL); + if (!ASSERT_OK_PTR(l, "urand_without_sema_attach")) + goto cleanup; + skel->links.urand_read_without_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urand_read_with_sema, + urand_pid, "./urandom_read", + "urand", "read_with_sema", NULL); + if (!ASSERT_OK_PTR(l, "urand_with_sema_attach")) + goto cleanup; + skel->links.urand_read_with_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urandlib_read_without_sema, + urand_pid, "./liburandom_read.so", + "urandlib", "read_without_sema", NULL); + if (!ASSERT_OK_PTR(l, "urandlib_without_sema_attach")) + goto cleanup; + skel->links.urandlib_read_without_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urandlib_read_with_sema, + urand_pid, "./liburandom_read.so", + "urandlib", "read_with_sema", NULL); + if (!ASSERT_OK_PTR(l, "urandlib_with_sema_attach")) + goto cleanup; + skel->links.urandlib_read_with_sema = l; + + } + + /* trigger urandom_read USDTs */ + ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code"); + + ASSERT_EQ(bss->urand_read_without_sema_call_cnt, 1, "urand_wo_sema_cnt"); + ASSERT_EQ(bss->urand_read_without_sema_buf_sz_sum, 256, "urand_wo_sema_sum"); + + ASSERT_EQ(bss->urand_read_with_sema_call_cnt, 1, "urand_w_sema_cnt"); + ASSERT_EQ(bss->urand_read_with_sema_buf_sz_sum, 256, "urand_w_sema_sum"); + + ASSERT_EQ(bss->urandlib_read_without_sema_call_cnt, 1, "urandlib_wo_sema_cnt"); + ASSERT_EQ(bss->urandlib_read_without_sema_buf_sz_sum, 256, "urandlib_wo_sema_sum"); + + ASSERT_EQ(bss->urandlib_read_with_sema_call_cnt, 1, "urandlib_w_sema_cnt"); + ASSERT_EQ(bss->urandlib_read_with_sema_buf_sz_sum, 256, "urandlib_w_sema_sum"); + +cleanup: + if (urand_pipe) + pclose(urand_pipe); + test_urandom_usdt__destroy(skel); +} + +void test_usdt(void) +{ + if (test__start_subtest("basic")) + subtest_basic_usdt(); + if (test__start_subtest("multispec")) + subtest_multispec_usdt(); + if (test__start_subtest("urand_auto_attach")) + subtest_urandom_usdt(true /* auto_attach */); + if (test__start_subtest("urand_pid_attach")) + subtest_urandom_usdt(false /* auto_attach */); +} |