diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
68 files changed, 9863 insertions, 277 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c index b17bfa0e0aac..bb143de68875 100644 --- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c @@ -96,12 +96,80 @@ static void test_parse_test_list(void) goto error; ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name"); ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("t/subtest1,t/subtest2", &set, true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 2, "subtest filters count")) + goto error; + ASSERT_OK(strcmp("t", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("subtest1", set.tests[0].subtests[0]), "subtest name"); + ASSERT_OK(strcmp("subtest2", set.tests[0].subtests[1]), "subtest name"); error: free_test_filter_set(&set); } +static void test_parse_test_list_file(void) +{ + struct test_filter_set set; + char tmpfile[80]; + FILE *fp; + int fd; + + snprintf(tmpfile, sizeof(tmpfile), "/tmp/bpf_arg_parsing_test.XXXXXX"); + fd = mkstemp(tmpfile); + if (!ASSERT_GE(fd, 0, "create tmp")) + return; + + fp = fdopen(fd, "w"); + if (!ASSERT_NEQ(fp, NULL, "fdopen tmp")) { + close(fd); + goto out_remove; + } + + fprintf(fp, "# comment\n"); + fprintf(fp, " test_with_spaces \n"); + fprintf(fp, "testA/subtest # comment\n"); + fprintf(fp, "testB#comment with no space\n"); + fprintf(fp, "testB # duplicate\n"); + fprintf(fp, "testA/subtest # subtest duplicate\n"); + fprintf(fp, "testA/subtest2\n"); + fprintf(fp, "testC_no_eof_newline"); + fflush(fp); + + if (!ASSERT_OK(ferror(fp), "prepare tmp")) + goto out_fclose; + + init_test_filter_set(&set); + + ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"); + + ASSERT_EQ(set.cnt, 4, "test count"); + ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name"); + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count"); + ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name"); + ASSERT_EQ(set.tests[1].subtest_cnt, 2, "test 1 subtest count"); + ASSERT_OK(strcmp("subtest", set.tests[1].subtests[0]), "test 1 subtest 0"); + ASSERT_OK(strcmp("subtest2", set.tests[1].subtests[1]), "test 1 subtest 1"); + ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name"); + ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name"); + + free_test_filter_set(&set); + +out_fclose: + fclose(fp); +out_remove: + remove(tmpfile); +} + void test_arg_parsing(void) { if (test__start_subtest("test_parse_test_list")) test_parse_test_list(); + if (test__start_subtest("test_parse_test_list_file")) + test_parse_test_list_file(); } diff --git a/tools/testing/selftests/bpf/prog_tests/assign_reuse.c b/tools/testing/selftests/bpf/prog_tests/assign_reuse.c new file mode 100644 index 000000000000..989ee4d9785b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/assign_reuse.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include <uapi/linux/if_link.h> +#include <test_progs.h> + +#include <netinet/tcp.h> +#include <netinet/udp.h> + +#include "network_helpers.h" +#include "test_assign_reuse.skel.h" + +#define NS_TEST "assign_reuse" +#define LOOPBACK 1 +#define PORT 4443 + +static int attach_reuseport(int sock_fd, int prog_fd) +{ + return setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, + &prog_fd, sizeof(prog_fd)); +} + +static __u64 cookie(int fd) +{ + __u64 cookie = 0; + socklen_t cookie_len = sizeof(cookie); + int ret; + + ret = getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len); + ASSERT_OK(ret, "cookie"); + ASSERT_GT(cookie, 0, "cookie_invalid"); + + return cookie; +} + +static int echo_test_udp(int fd_sv) +{ + struct sockaddr_storage addr = {}; + socklen_t len = sizeof(addr); + char buff[1] = {}; + int fd_cl = -1, ret; + + fd_cl = connect_to_fd(fd_sv, 100); + ASSERT_GT(fd_cl, 0, "create_client"); + ASSERT_EQ(getsockname(fd_cl, (void *)&addr, &len), 0, "getsockname"); + + ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client"); + + ret = recv(fd_sv, buff, sizeof(buff), 0); + if (ret < 0) { + close(fd_cl); + return errno; + } + + ASSERT_EQ(ret, 1, "recv_server"); + ASSERT_EQ(sendto(fd_sv, buff, sizeof(buff), 0, (void *)&addr, len), 1, "send_server"); + ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client"); + close(fd_cl); + return 0; +} + +static int echo_test_tcp(int fd_sv) +{ + char buff[1] = {}; + int fd_cl = -1, fd_sv_cl = -1; + + fd_cl = connect_to_fd(fd_sv, 100); + if (fd_cl < 0) + return errno; + + fd_sv_cl = accept(fd_sv, NULL, NULL); + ASSERT_GE(fd_sv_cl, 0, "accept_fd"); + + ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client"); + ASSERT_EQ(recv(fd_sv_cl, buff, sizeof(buff), 0), 1, "recv_server"); + ASSERT_EQ(send(fd_sv_cl, buff, sizeof(buff), 0), 1, "send_server"); + ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client"); + close(fd_sv_cl); + close(fd_cl); + return 0; +} + +void run_assign_reuse(int family, int sotype, const char *ip, __u16 port) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .ifindex = LOOPBACK, + .attach_point = BPF_TC_INGRESS, + ); + DECLARE_LIBBPF_OPTS(bpf_tc_opts, tc_opts, + .handle = 1, + .priority = 1, + ); + bool hook_created = false, tc_attached = false; + int ret, fd_tc, fd_accept, fd_drop, fd_map; + int *fd_sv = NULL; + __u64 fd_val; + struct test_assign_reuse *skel; + const int zero = 0; + + skel = test_assign_reuse__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + skel->rodata->dest_port = port; + + ret = test_assign_reuse__load(skel); + if (!ASSERT_OK(ret, "skel_load")) + goto cleanup; + + ASSERT_EQ(skel->bss->sk_cookie_seen, 0, "cookie_init"); + + fd_tc = bpf_program__fd(skel->progs.tc_main); + fd_accept = bpf_program__fd(skel->progs.reuse_accept); + fd_drop = bpf_program__fd(skel->progs.reuse_drop); + fd_map = bpf_map__fd(skel->maps.sk_map); + + fd_sv = start_reuseport_server(family, sotype, ip, port, 100, 1); + if (!ASSERT_NEQ(fd_sv, NULL, "start_reuseport_server")) + goto cleanup; + + ret = attach_reuseport(*fd_sv, fd_drop); + if (!ASSERT_OK(ret, "attach_reuseport")) + goto cleanup; + + fd_val = *fd_sv; + ret = bpf_map_update_elem(fd_map, &zero, &fd_val, BPF_NOEXIST); + if (!ASSERT_OK(ret, "bpf_sk_map")) + goto cleanup; + + ret = bpf_tc_hook_create(&tc_hook); + if (ret == 0) + hook_created = true; + ret = ret == -EEXIST ? 0 : ret; + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + goto cleanup; + + tc_opts.prog_fd = fd_tc; + ret = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) + goto cleanup; + tc_attached = true; + + if (sotype == SOCK_STREAM) + ASSERT_EQ(echo_test_tcp(*fd_sv), ECONNREFUSED, "drop_tcp"); + else + ASSERT_EQ(echo_test_udp(*fd_sv), EAGAIN, "drop_udp"); + ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once"); + + skel->bss->sk_cookie_seen = 0; + skel->bss->reuseport_executed = 0; + ASSERT_OK(attach_reuseport(*fd_sv, fd_accept), "attach_reuseport(accept)"); + + if (sotype == SOCK_STREAM) + ASSERT_EQ(echo_test_tcp(*fd_sv), 0, "echo_tcp"); + else + ASSERT_EQ(echo_test_udp(*fd_sv), 0, "echo_udp"); + + ASSERT_EQ(skel->bss->sk_cookie_seen, cookie(*fd_sv), + "cookie_mismatch"); + ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once"); +cleanup: + if (tc_attached) { + tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; + ret = bpf_tc_detach(&tc_hook, &tc_opts); + ASSERT_OK(ret, "bpf_tc_detach"); + } + if (hook_created) { + tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; + bpf_tc_hook_destroy(&tc_hook); + } + test_assign_reuse__destroy(skel); + free_fds(fd_sv, 1); +} + +void test_assign_reuse(void) +{ + struct nstoken *tok = NULL; + + SYS(out, "ip netns add %s", NS_TEST); + SYS(cleanup, "ip -net %s link set dev lo up", NS_TEST); + + tok = open_netns(NS_TEST); + if (!ASSERT_OK_PTR(tok, "netns token")) + return; + + if (test__start_subtest("tcpv4")) + run_assign_reuse(AF_INET, SOCK_STREAM, "127.0.0.1", PORT); + if (test__start_subtest("tcpv6")) + run_assign_reuse(AF_INET6, SOCK_STREAM, "::1", PORT); + if (test__start_subtest("udpv4")) + run_assign_reuse(AF_INET, SOCK_DGRAM, "127.0.0.1", PORT); + if (test__start_subtest("udpv6")) + run_assign_reuse(AF_INET6, SOCK_DGRAM, "::1", PORT); + +cleanup: + close_netns(tok); + SYS_NOFAIL("ip netns delete %s", NS_TEST); +out: + return; +} diff --git a/tools/testing/selftests/bpf/prog_tests/async_stack_depth.c b/tools/testing/selftests/bpf/prog_tests/async_stack_depth.c new file mode 100644 index 000000000000..118abc29b236 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/async_stack_depth.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +#include "async_stack_depth.skel.h" + +void test_async_stack_depth(void) +{ + RUN_TESTS(async_stack_depth); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 26b2d1bffdfd..1454cebc262b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -11,6 +11,7 @@ #include <bpf/btf.h> #include "test_bpf_cookie.skel.h" #include "kprobe_multi.skel.h" +#include "uprobe_multi.skel.h" /* uprobe attach point */ static noinline void trigger_func(void) @@ -239,6 +240,81 @@ cleanup: bpf_link__destroy(link1); kprobe_multi__destroy(skel); } + +/* defined in prog_tests/uprobe_multi_test.c */ +void uprobe_multi_func_1(void); +void uprobe_multi_func_2(void); +void uprobe_multi_func_3(void); + +static void uprobe_multi_test_run(struct uprobe_multi *skel) +{ + skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; + skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; + skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result"); +} + +static void uprobe_multi_attach_api_subtest(void) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + struct uprobe_multi *skel = NULL; + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + __u64 cookies[3]; + + cookies[0] = 3; /* uprobe_multi_func_1 */ + cookies[1] = 1; /* uprobe_multi_func_2 */ + cookies[2] = 2; /* uprobe_multi_func_3 */ + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = &cookies[0]; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi")) + goto cleanup; + + link1 = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1, + "/proc/self/exe", NULL, &opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + cookies[0] = 2; /* uprobe_multi_func_1 */ + cookies[1] = 3; /* uprobe_multi_func_2 */ + cookies[2] = 1; /* uprobe_multi_func_3 */ + + opts.retprobe = true; + link2 = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, -1, + "/proc/self/exe", NULL, &opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) + goto cleanup; + + uprobe_multi_test_run(skel); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + uprobe_multi__destroy(skel); +} + static void uprobe_subtest(struct test_bpf_cookie *skel) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); @@ -515,6 +591,8 @@ void test_bpf_cookie(void) kprobe_multi_attach_api_subtest(); if (test__start_subtest("uprobe")) uprobe_subtest(skel); + if (test__start_subtest("multi_uprobe_attach_api")) + uprobe_multi_attach_api_subtest(); if (test__start_subtest("tracepoint")) tp_subtest(skel); if (test__start_subtest("perf_event")) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c index a4d0cc9d3367..fe2c502e5089 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c @@ -11,6 +11,7 @@ #include "ksym_race.skel.h" #include "bpf_mod_race.skel.h" #include "kfunc_call_race.skel.h" +#include "testing_helpers.h" /* This test crafts a race between btf_try_get_module and do_init_module, and * checks whether btf_try_get_module handles the invocation for a well-formed @@ -44,35 +45,10 @@ enum bpf_test_state { static _Atomic enum bpf_test_state state = _TS_INVALID; -static int sys_finit_module(int fd, const char *param_values, int flags) -{ - return syscall(__NR_finit_module, fd, param_values, flags); -} - -static int sys_delete_module(const char *name, unsigned int flags) -{ - return syscall(__NR_delete_module, name, flags); -} - -static int load_module(const char *mod) -{ - int ret, fd; - - fd = open("bpf_testmod.ko", O_RDONLY); - if (fd < 0) - return fd; - - ret = sys_finit_module(fd, "", 0); - close(fd); - if (ret < 0) - return ret; - return 0; -} - static void *load_module_thread(void *p) { - if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail")) + if (!ASSERT_NEQ(load_bpf_testmod(false), 0, "load_module_thread must fail")) atomic_store(&state, TS_MODULE_LOAD); else atomic_store(&state, TS_MODULE_LOAD_FAIL); @@ -124,7 +100,7 @@ static void test_bpf_mod_race_config(const struct test_config *config) if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration")) return; - if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod")) + if (!ASSERT_OK(unload_bpf_testmod(false), "unload bpf_testmod")) goto end_mmap; skel = bpf_mod_race__open(); @@ -202,8 +178,8 @@ end_destroy: bpf_mod_race__destroy(skel); ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu"); end_module: - sys_delete_module("bpf_testmod", 0); - ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod"); + unload_bpf_testmod(false); + ASSERT_OK(load_bpf_testmod(false), "restore bpf_testmod"); end_mmap: munmap(fault_addr, 4096); atomic_store(&state, _TS_INVALID); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index c8ba4009e4ab..b30ff6b3b81a 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -123,12 +123,13 @@ static void test_bpf_nf_ct(int mode) ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting"); ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting"); end: - if (srv_client_fd != -1) - close(srv_client_fd); if (client_fd != -1) close(client_fd); + if (srv_client_fd != -1) + close(srv_client_fd); if (srv_fd != -1) close(srv_fd); + snprintf(cmd, sizeof(cmd), iptables, "-D"); system(cmd); test_bpf_nf__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c new file mode 100644 index 000000000000..ee0458a5ce78 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#define _GNU_SOURCE +#include <test_progs.h> +#include <bpf/btf.h> +#include <fcntl.h> +#include <unistd.h> +#include <linux/unistd.h> +#include <linux/mount.h> +#include <sys/syscall.h> +#include "bpf/libbpf_internal.h" + +static inline int sys_fsopen(const char *fsname, unsigned flags) +{ + return syscall(__NR_fsopen, fsname, flags); +} + +static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux) +{ + return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux); +} + +static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags) +{ + return syscall(__NR_fsmount, fs_fd, flags, ms_flags); +} + +__attribute__((unused)) +static inline int sys_move_mount(int from_dfd, const char *from_path, + int to_dfd, const char *to_path, + unsigned int ms_flags) +{ + return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, ms_flags); +} + +static void bpf_obj_pinning_detached(void) +{ + LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts); + LIBBPF_OPTS(bpf_obj_get_opts, get_opts); + int fs_fd = -1, mnt_fd = -1; + int map_fd = -1, map_fd2 = -1; + int zero = 0, src_value, dst_value, err; + const char *map_name = "fsmount_map"; + + /* A bunch of below UAPI calls are constructed based on reading: + * https://brauner.io/2023/02/28/mounting-into-mount-namespaces.html + */ + + /* create VFS context */ + fs_fd = sys_fsopen("bpf", 0); + if (!ASSERT_GE(fs_fd, 0, "fs_fd")) + goto cleanup; + + /* instantiate FS object */ + err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0); + if (!ASSERT_OK(err, "fs_create")) + goto cleanup; + + /* create O_PATH fd for detached mount */ + mnt_fd = sys_fsmount(fs_fd, 0, 0); + if (!ASSERT_GE(mnt_fd, 0, "mnt_fd")) + goto cleanup; + + /* If we wanted to expose detached mount in the file system, we'd do + * something like below. But the whole point is that we actually don't + * even have to expose BPF FS in the file system to be able to work + * (pin/get objects) with it. + * + * err = sys_move_mount(mnt_fd, "", -EBADF, mnt_path, MOVE_MOUNT_F_EMPTY_PATH); + * if (!ASSERT_OK(err, "move_mount")) + * goto cleanup; + */ + + /* create BPF map to pin */ + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL); + if (!ASSERT_GE(map_fd, 0, "map_fd")) + goto cleanup; + + /* pin BPF map into detached BPF FS through mnt_fd */ + pin_opts.file_flags = BPF_F_PATH_FD; + pin_opts.path_fd = mnt_fd; + err = bpf_obj_pin_opts(map_fd, map_name, &pin_opts); + if (!ASSERT_OK(err, "map_pin")) + goto cleanup; + + /* get BPF map from detached BPF FS through mnt_fd */ + get_opts.file_flags = BPF_F_PATH_FD; + get_opts.path_fd = mnt_fd; + map_fd2 = bpf_obj_get_opts(map_name, &get_opts); + if (!ASSERT_GE(map_fd2, 0, "map_get")) + goto cleanup; + + /* update map through one FD */ + src_value = 0xcafebeef; + err = bpf_map_update_elem(map_fd, &zero, &src_value, 0); + ASSERT_OK(err, "map_update"); + + /* check values written/read through different FDs do match */ + dst_value = 0; + err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value); + ASSERT_OK(err, "map_lookup"); + ASSERT_EQ(dst_value, src_value, "map_value_eq1"); + ASSERT_EQ(dst_value, 0xcafebeef, "map_value_eq2"); + +cleanup: + if (map_fd >= 0) + ASSERT_OK(close(map_fd), "close_map_fd"); + if (map_fd2 >= 0) + ASSERT_OK(close(map_fd2), "close_map_fd2"); + if (fs_fd >= 0) + ASSERT_OK(close(fs_fd), "close_fs_fd"); + if (mnt_fd >= 0) + ASSERT_OK(close(mnt_fd), "close_mnt_fd"); +} + +enum path_kind +{ + PATH_STR_ABS, + PATH_STR_REL, + PATH_FD_REL, +}; + +static void validate_pin(int map_fd, const char *map_name, int src_value, + enum path_kind path_kind) +{ + LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts); + char abs_path[PATH_MAX], old_cwd[PATH_MAX]; + const char *pin_path = NULL; + int zero = 0, dst_value, map_fd2, err; + + snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name); + old_cwd[0] = '\0'; + + switch (path_kind) { + case PATH_STR_ABS: + /* absolute path */ + pin_path = abs_path; + break; + case PATH_STR_REL: + /* cwd + relative path */ + ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd"); + ASSERT_OK(chdir("/sys/fs/bpf"), "chdir"); + pin_path = map_name; + break; + case PATH_FD_REL: + /* dir fd + relative path */ + pin_opts.file_flags = BPF_F_PATH_FD; + pin_opts.path_fd = open("/sys/fs/bpf", O_PATH); + ASSERT_GE(pin_opts.path_fd, 0, "path_fd"); + pin_path = map_name; + break; + } + + /* pin BPF map using specified path definition */ + err = bpf_obj_pin_opts(map_fd, pin_path, &pin_opts); + ASSERT_OK(err, "obj_pin"); + + /* cleanup */ + if (path_kind == PATH_FD_REL && pin_opts.path_fd >= 0) + close(pin_opts.path_fd); + if (old_cwd[0]) + ASSERT_OK(chdir(old_cwd), "restore_cwd"); + + map_fd2 = bpf_obj_get(abs_path); + if (!ASSERT_GE(map_fd2, 0, "map_get")) + goto cleanup; + + /* update map through one FD */ + err = bpf_map_update_elem(map_fd, &zero, &src_value, 0); + ASSERT_OK(err, "map_update"); + + /* check values written/read through different FDs do match */ + dst_value = 0; + err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value); + ASSERT_OK(err, "map_lookup"); + ASSERT_EQ(dst_value, src_value, "map_value_eq"); +cleanup: + if (map_fd2 >= 0) + ASSERT_OK(close(map_fd2), "close_map_fd2"); + unlink(abs_path); +} + +static void validate_get(int map_fd, const char *map_name, int src_value, + enum path_kind path_kind) +{ + LIBBPF_OPTS(bpf_obj_get_opts, get_opts); + char abs_path[PATH_MAX], old_cwd[PATH_MAX]; + const char *pin_path = NULL; + int zero = 0, dst_value, map_fd2, err; + + snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name); + /* pin BPF map using specified path definition */ + err = bpf_obj_pin(map_fd, abs_path); + if (!ASSERT_OK(err, "pin_map")) + return; + + old_cwd[0] = '\0'; + + switch (path_kind) { + case PATH_STR_ABS: + /* absolute path */ + pin_path = abs_path; + break; + case PATH_STR_REL: + /* cwd + relative path */ + ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd"); + ASSERT_OK(chdir("/sys/fs/bpf"), "chdir"); + pin_path = map_name; + break; + case PATH_FD_REL: + /* dir fd + relative path */ + get_opts.file_flags = BPF_F_PATH_FD; + get_opts.path_fd = open("/sys/fs/bpf", O_PATH); + ASSERT_GE(get_opts.path_fd, 0, "path_fd"); + pin_path = map_name; + break; + } + + map_fd2 = bpf_obj_get_opts(pin_path, &get_opts); + if (!ASSERT_GE(map_fd2, 0, "map_get")) + goto cleanup; + + /* cleanup */ + if (path_kind == PATH_FD_REL && get_opts.path_fd >= 0) + close(get_opts.path_fd); + if (old_cwd[0]) + ASSERT_OK(chdir(old_cwd), "restore_cwd"); + + /* update map through one FD */ + err = bpf_map_update_elem(map_fd, &zero, &src_value, 0); + ASSERT_OK(err, "map_update"); + + /* check values written/read through different FDs do match */ + dst_value = 0; + err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value); + ASSERT_OK(err, "map_lookup"); + ASSERT_EQ(dst_value, src_value, "map_value_eq"); +cleanup: + if (map_fd2 >= 0) + ASSERT_OK(close(map_fd2), "close_map_fd2"); + unlink(abs_path); +} + +static void bpf_obj_pinning_mounted(enum path_kind path_kind) +{ + const char *map_name = "mounted_map"; + int map_fd; + + /* create BPF map to pin */ + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL); + if (!ASSERT_GE(map_fd, 0, "map_fd")) + return; + + validate_pin(map_fd, map_name, 100 + (int)path_kind, path_kind); + validate_get(map_fd, map_name, 200 + (int)path_kind, path_kind); + ASSERT_OK(close(map_fd), "close_map_fd"); +} + +void test_bpf_obj_pinning() +{ + if (test__start_subtest("detached")) + bpf_obj_pinning_detached(); + if (test__start_subtest("mounted-str-abs")) + bpf_obj_pinning_mounted(PATH_STR_ABS); + if (test__start_subtest("mounted-str-rel")) + bpf_obj_pinning_mounted(PATH_STR_REL); + if (test__start_subtest("mounted-fd-rel")) + bpf_obj_pinning_mounted(PATH_FD_REL); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index a53c254c6058..4aabeaa525d4 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -185,6 +185,8 @@ static void test_cubic(void) do_test("bpf_cubic", NULL); + ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called"); + bpf_link__destroy(link); bpf_cubic__destroy(cubic_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 210d643fda6c..4e0cdb593318 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3991,6 +3991,46 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid arg#1", }, { + .descr = "decl_tag test #18, decl_tag as the map key type", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 32), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0m1\0m2\0tag"), + .map_type = BPF_MAP_TYPE_HASH, + .map_name = "tag_type_check_btf", + .key_size = 8, + .value_size = 4, + .key_type_id = 3, + .value_type_id = 1, + .max_entries = 1, + .map_create_err = true, +}, +{ + .descr = "decl_tag test #19, decl_tag as the map value type", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 32), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0m1\0m2\0tag"), + .map_type = BPF_MAP_TYPE_HASH, + .map_name = "tag_type_check_btf", + .key_size = 4, + .value_size = 8, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 1, + .map_create_err = true, +}, +{ .descr = "type_tag test #1", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c index 4d2fa99273d8..2bb5773d6f99 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c @@ -25,6 +25,8 @@ static void test_setsockopt_set(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, assert that * we actually get that error when we run setsockopt() */ @@ -59,6 +61,8 @@ static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, and one that gets the * previously set errno. Assert that we get the same errno back. */ @@ -100,6 +104,8 @@ static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that gets the previously set errno. * Assert that, without anything setting one, we get 0. */ @@ -134,6 +140,8 @@ static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that gets the previously set errno, and then * one that sets the errno to EUNATCH. Assert that the get does not * see EUNATCH set later, and does not prevent EUNATCH from being set. @@ -177,6 +185,8 @@ static void test_setsockopt_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, then one that sets EISCONN, * and then one that gets the exported errno. Assert both the syscall * and the helper sees the last set errno. @@ -224,6 +234,8 @@ static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that return a reject without setting errno * (legacy reject), and one that gets the errno. Assert that for * backward compatibility the syscall result in EPERM, and this @@ -268,6 +280,8 @@ static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, then one that return a reject * without setting errno, and then one that gets the exported errno. * Assert both the syscall and the helper's errno are unaffected by @@ -319,6 +333,8 @@ static void test_getsockopt_get(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that gets previously set errno. Assert that the * error from kernel is in both ctx_retval_value and retval_value. */ @@ -359,6 +375,8 @@ static void test_getsockopt_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that sets retval to -EISCONN. Assert that this * overrides the value from kernel. */ @@ -396,6 +414,8 @@ static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that sets retval to -EISCONN, and one that clears * ctx retval. Assert that the clearing ctx retval is synced to helper * and clears any errors both from kernel and BPF.. diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c b/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c new file mode 100644 index 000000000000..a1542faf7873 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Facebook */ +#include <test_progs.h> +#include <linux/in6.h> +#include <sys/socket.h> +#include <sched.h> +#include <unistd.h> +#include "cgroup_helpers.h" +#include "testing_helpers.h" +#include "cgroup_tcp_skb.skel.h" +#include "cgroup_tcp_skb.h" +#include "network_helpers.h" + +#define CGROUP_TCP_SKB_PATH "/test_cgroup_tcp_skb" + +static int install_filters(int cgroup_fd, + struct bpf_link **egress_link, + struct bpf_link **ingress_link, + struct bpf_program *egress_prog, + struct bpf_program *ingress_prog, + struct cgroup_tcp_skb *skel) +{ + /* Prepare filters */ + skel->bss->g_sock_state = 0; + skel->bss->g_unexpected = 0; + *egress_link = + bpf_program__attach_cgroup(egress_prog, + cgroup_fd); + if (!ASSERT_OK_PTR(egress_link, "egress_link")) + return -1; + *ingress_link = + bpf_program__attach_cgroup(ingress_prog, + cgroup_fd); + if (!ASSERT_OK_PTR(ingress_link, "ingress_link")) + return -1; + + return 0; +} + +static void uninstall_filters(struct bpf_link **egress_link, + struct bpf_link **ingress_link) +{ + bpf_link__destroy(*egress_link); + *egress_link = NULL; + bpf_link__destroy(*ingress_link); + *ingress_link = NULL; +} + +static int create_client_sock_v6(void) +{ + int fd; + + fd = socket(AF_INET6, SOCK_STREAM, 0); + if (fd < 0) { + perror("socket"); + return -1; + } + + return fd; +} + +/* Connect to the server in a cgroup from the outside of the cgroup. */ +static int talk_to_cgroup(int *client_fd, int *listen_fd, int *service_fd, + struct cgroup_tcp_skb *skel) +{ + int err, cp; + char buf[5]; + int port; + + /* Create client & server socket */ + err = join_root_cgroup(); + if (!ASSERT_OK(err, "join_root_cgroup")) + return -1; + *client_fd = create_client_sock_v6(); + if (!ASSERT_GE(*client_fd, 0, "client_fd")) + return -1; + err = join_cgroup(CGROUP_TCP_SKB_PATH); + if (!ASSERT_OK(err, "join_cgroup")) + return -1; + *listen_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(*listen_fd, 0, "listen_fd")) + return -1; + port = get_socket_local_port(*listen_fd); + if (!ASSERT_GE(port, 0, "get_socket_local_port")) + return -1; + skel->bss->g_sock_port = ntohs(port); + + /* Connect client to server */ + err = connect_fd_to_fd(*client_fd, *listen_fd, 0); + if (!ASSERT_OK(err, "connect_fd_to_fd")) + return -1; + *service_fd = accept(*listen_fd, NULL, NULL); + if (!ASSERT_GE(*service_fd, 0, "service_fd")) + return -1; + err = join_root_cgroup(); + if (!ASSERT_OK(err, "join_root_cgroup")) + return -1; + cp = write(*client_fd, "hello", 5); + if (!ASSERT_EQ(cp, 5, "write")) + return -1; + cp = read(*service_fd, buf, 5); + if (!ASSERT_EQ(cp, 5, "read")) + return -1; + + return 0; +} + +/* Connect to the server out of a cgroup from inside the cgroup. */ +static int talk_to_outside(int *client_fd, int *listen_fd, int *service_fd, + struct cgroup_tcp_skb *skel) + +{ + int err, cp; + char buf[5]; + int port; + + /* Create client & server socket */ + err = join_root_cgroup(); + if (!ASSERT_OK(err, "join_root_cgroup")) + return -1; + *listen_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(*listen_fd, 0, "listen_fd")) + return -1; + err = join_cgroup(CGROUP_TCP_SKB_PATH); + if (!ASSERT_OK(err, "join_cgroup")) + return -1; + *client_fd = create_client_sock_v6(); + if (!ASSERT_GE(*client_fd, 0, "client_fd")) + return -1; + err = join_root_cgroup(); + if (!ASSERT_OK(err, "join_root_cgroup")) + return -1; + port = get_socket_local_port(*listen_fd); + if (!ASSERT_GE(port, 0, "get_socket_local_port")) + return -1; + skel->bss->g_sock_port = ntohs(port); + + /* Connect client to server */ + err = connect_fd_to_fd(*client_fd, *listen_fd, 0); + if (!ASSERT_OK(err, "connect_fd_to_fd")) + return -1; + *service_fd = accept(*listen_fd, NULL, NULL); + if (!ASSERT_GE(*service_fd, 0, "service_fd")) + return -1; + cp = write(*client_fd, "hello", 5); + if (!ASSERT_EQ(cp, 5, "write")) + return -1; + cp = read(*service_fd, buf, 5); + if (!ASSERT_EQ(cp, 5, "read")) + return -1; + + return 0; +} + +static int close_connection(int *closing_fd, int *peer_fd, int *listen_fd, + struct cgroup_tcp_skb *skel) +{ + __u32 saved_packet_count = 0; + int err; + int i; + + /* Wait for ACKs to be sent */ + saved_packet_count = skel->bss->g_packet_count; + usleep(100000); /* 0.1s */ + for (i = 0; + skel->bss->g_packet_count != saved_packet_count && i < 10; + i++) { + saved_packet_count = skel->bss->g_packet_count; + usleep(100000); /* 0.1s */ + } + if (!ASSERT_EQ(skel->bss->g_packet_count, saved_packet_count, + "packet_count")) + return -1; + + skel->bss->g_packet_count = 0; + saved_packet_count = 0; + + /* Half shutdown to make sure the closing socket having a chance to + * receive a FIN from the peer. + */ + err = shutdown(*closing_fd, SHUT_WR); + if (!ASSERT_OK(err, "shutdown closing_fd")) + return -1; + + /* Wait for FIN and the ACK of the FIN to be observed */ + for (i = 0; + skel->bss->g_packet_count < saved_packet_count + 2 && i < 10; + i++) + usleep(100000); /* 0.1s */ + if (!ASSERT_GE(skel->bss->g_packet_count, saved_packet_count + 2, + "packet_count")) + return -1; + + saved_packet_count = skel->bss->g_packet_count; + + /* Fully shutdown the connection */ + err = close(*peer_fd); + if (!ASSERT_OK(err, "close peer_fd")) + return -1; + *peer_fd = -1; + + /* Wait for FIN and the ACK of the FIN to be observed */ + for (i = 0; + skel->bss->g_packet_count < saved_packet_count + 2 && i < 10; + i++) + usleep(100000); /* 0.1s */ + if (!ASSERT_GE(skel->bss->g_packet_count, saved_packet_count + 2, + "packet_count")) + return -1; + + err = close(*closing_fd); + if (!ASSERT_OK(err, "close closing_fd")) + return -1; + *closing_fd = -1; + + close(*listen_fd); + *listen_fd = -1; + + return 0; +} + +/* This test case includes four scenarios: + * 1. Connect to the server from outside the cgroup and close the connection + * from outside the cgroup. + * 2. Connect to the server from outside the cgroup and close the connection + * from inside the cgroup. + * 3. Connect to the server from inside the cgroup and close the connection + * from outside the cgroup. + * 4. Connect to the server from inside the cgroup and close the connection + * from inside the cgroup. + * + * The test case is to verify that cgroup_skb/{egress,ingress} filters + * receive expected packets including SYN, SYN/ACK, ACK, FIN, and FIN/ACK. + */ +void test_cgroup_tcp_skb(void) +{ + struct bpf_link *ingress_link = NULL; + struct bpf_link *egress_link = NULL; + int client_fd = -1, listen_fd = -1; + struct cgroup_tcp_skb *skel; + int service_fd = -1; + int cgroup_fd = -1; + int err; + + skel = cgroup_tcp_skb__open_and_load(); + if (!ASSERT_OK(!skel, "skel_open_load")) + return; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "setup_cgroup_environment")) + goto cleanup; + + cgroup_fd = create_and_get_cgroup(CGROUP_TCP_SKB_PATH); + if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd")) + goto cleanup; + + /* Scenario 1 */ + err = install_filters(cgroup_fd, &egress_link, &ingress_link, + skel->progs.server_egress, + skel->progs.server_ingress, + skel); + if (!ASSERT_OK(err, "install_filters")) + goto cleanup; + + err = talk_to_cgroup(&client_fd, &listen_fd, &service_fd, skel); + if (!ASSERT_OK(err, "talk_to_cgroup")) + goto cleanup; + + err = close_connection(&client_fd, &service_fd, &listen_fd, skel); + if (!ASSERT_OK(err, "close_connection")) + goto cleanup; + + ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected"); + ASSERT_EQ(skel->bss->g_sock_state, CLOSED, "g_sock_state"); + + uninstall_filters(&egress_link, &ingress_link); + + /* Scenario 2 */ + err = install_filters(cgroup_fd, &egress_link, &ingress_link, + skel->progs.server_egress_srv, + skel->progs.server_ingress_srv, + skel); + + err = talk_to_cgroup(&client_fd, &listen_fd, &service_fd, skel); + if (!ASSERT_OK(err, "talk_to_cgroup")) + goto cleanup; + + err = close_connection(&service_fd, &client_fd, &listen_fd, skel); + if (!ASSERT_OK(err, "close_connection")) + goto cleanup; + + ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected"); + ASSERT_EQ(skel->bss->g_sock_state, TIME_WAIT, "g_sock_state"); + + uninstall_filters(&egress_link, &ingress_link); + + /* Scenario 3 */ + err = install_filters(cgroup_fd, &egress_link, &ingress_link, + skel->progs.client_egress_srv, + skel->progs.client_ingress_srv, + skel); + + err = talk_to_outside(&client_fd, &listen_fd, &service_fd, skel); + if (!ASSERT_OK(err, "talk_to_outside")) + goto cleanup; + + err = close_connection(&service_fd, &client_fd, &listen_fd, skel); + if (!ASSERT_OK(err, "close_connection")) + goto cleanup; + + ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected"); + ASSERT_EQ(skel->bss->g_sock_state, CLOSED, "g_sock_state"); + + uninstall_filters(&egress_link, &ingress_link); + + /* Scenario 4 */ + err = install_filters(cgroup_fd, &egress_link, &ingress_link, + skel->progs.client_egress, + skel->progs.client_ingress, + skel); + + err = talk_to_outside(&client_fd, &listen_fd, &service_fd, skel); + if (!ASSERT_OK(err, "talk_to_outside")) + goto cleanup; + + err = close_connection(&client_fd, &service_fd, &listen_fd, skel); + if (!ASSERT_OK(err, "close_connection")) + goto cleanup; + + ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected"); + ASSERT_EQ(skel->bss->g_sock_state, TIME_WAIT, "g_sock_state"); + + uninstall_filters(&egress_link, &ingress_link); + +cleanup: + close(client_fd); + close(listen_fd); + close(service_fd); + close(cgroup_fd); + bpf_link__destroy(egress_link); + bpf_link__destroy(ingress_link); + cleanup_cgroup_environment(); + cgroup_tcp_skb__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c index 5338d2ea0460..2a9a30650350 100644 --- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c +++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c @@ -183,7 +183,7 @@ cleanup: void serial_test_check_mtu(void) { - __u32 mtu_lo; + int mtu_lo; if (test__start_subtest("bpf_check_mtu XDP-attach")) test_check_mtu_xdp_attach(); diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index cdf4acc18e4c..756ea8b590b6 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -10,6 +10,7 @@ static const char * const cpumask_success_testcases[] = { "test_set_clear_cpu", "test_setall_clear_cpu", "test_first_firstzero_cpu", + "test_firstand_nocpu", "test_test_and_set_clear", "test_and_or_xor", "test_intersects_subset", @@ -70,5 +71,6 @@ void test_cpumask(void) verify_success(cpumask_success_testcases[i]); } + RUN_TESTS(cpumask_success); RUN_TESTS(cpumask_failure); } diff --git a/tools/testing/selftests/bpf/prog_tests/d_path.c b/tools/testing/selftests/bpf/prog_tests/d_path.c index 911345c526e6..ccc768592e66 100644 --- a/tools/testing/selftests/bpf/prog_tests/d_path.c +++ b/tools/testing/selftests/bpf/prog_tests/d_path.c @@ -12,6 +12,17 @@ #include "test_d_path_check_rdonly_mem.skel.h" #include "test_d_path_check_types.skel.h" +/* sys_close_range is not around for long time, so let's + * make sure we can call it on systems with older glibc + */ +#ifndef __NR_close_range +#ifdef __alpha__ +#define __NR_close_range 546 +#else +#define __NR_close_range 436 +#endif +#endif + static int duration; static struct { @@ -90,7 +101,11 @@ static int trigger_fstat_events(pid_t pid) fstat(indicatorfd, &fileStat); out_close: - /* triggers filp_close */ + /* sys_close no longer triggers filp_close, but we can + * call sys_close_range instead which still does + */ +#define close(fd) syscall(__NR_close_range, fd, fd, 0) + close(pipefd[0]); close(pipefd[1]); close(sockfd); @@ -98,6 +113,8 @@ out_close: close(devfd); close(localfd); close(indicatorfd); + +#undef close return ret; } diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index d176c34a7d2e..7cfac53c0d58 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -20,6 +20,14 @@ static struct { {"test_ringbuf", SETUP_SYSCALL_SLEEP}, {"test_skb_readonly", SETUP_SKB_PROG}, {"test_dynptr_skb_data", SETUP_SKB_PROG}, + {"test_adjust", SETUP_SYSCALL_SLEEP}, + {"test_adjust_err", SETUP_SYSCALL_SLEEP}, + {"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_is_null", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_is_rdonly", SETUP_SKB_PROG}, + {"test_dynptr_clone", SETUP_SKB_PROG}, + {"test_dynptr_skb_no_buff", SETUP_SKB_PROG}, + {"test_dynptr_skb_strcmp", SETUP_SKB_PROG}, }; static void verify_success(const char *prog_name, enum test_setup_type setup_type) diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c index 3b77d8a422db..261228eb68e8 100644 --- a/tools/testing/selftests/bpf/prog_tests/empty_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c @@ -24,6 +24,7 @@ void test_empty_skb(void) int *ifindex; int err; int ret; + int lwt_egress_ret; /* expected retval at lwt/egress */ bool success_on_tc; } tests[] = { /* Empty packets are always rejected. */ @@ -57,6 +58,7 @@ void test_empty_skb(void) .data_size_in = sizeof(eth_hlen), .ifindex = &veth_ifindex, .ret = -ERANGE, + .lwt_egress_ret = -ERANGE, .success_on_tc = true, }, { @@ -70,6 +72,7 @@ void test_empty_skb(void) .data_size_in = sizeof(eth_hlen), .ifindex = &ipip_ifindex, .ret = -ERANGE, + .lwt_egress_ret = -ERANGE, }, /* ETH_HLEN+1-sized packet should be redirected. */ @@ -79,6 +82,7 @@ void test_empty_skb(void) .data_in = eth_hlen_pp, .data_size_in = sizeof(eth_hlen_pp), .ifindex = &veth_ifindex, + .lwt_egress_ret = 1, /* veth_xmit NET_XMIT_DROP */ }, { .msg = "ipip ETH_HLEN+1 packet ingress", @@ -108,8 +112,12 @@ void test_empty_skb(void) for (i = 0; i < ARRAY_SIZE(tests); i++) { bpf_object__for_each_program(prog, bpf_obj->obj) { - char buf[128]; + bool at_egress = strstr(bpf_program__name(prog), "egress") != NULL; bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2); + int expected_ret; + char buf[128]; + + expected_ret = at_egress && !at_tc ? tests[i].lwt_egress_ret : tests[i].ret; tattr.data_in = tests[i].data_in; tattr.data_size_in = tests[i].data_size_in; @@ -128,7 +136,7 @@ void test_empty_skb(void) if (at_tc && tests[i].success_on_tc) ASSERT_GE(bpf_obj->bss->ret, 0, buf); else - ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf); + ASSERT_EQ(bpf_obj->bss->ret, expected_ret, buf); } } diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c index c0d1d61d5f66..aee1bc77a17f 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -2,8 +2,9 @@ /* Copyright (c) 2019 Facebook */ #include <test_progs.h> #include "fentry_test.lskel.h" +#include "fentry_many_args.skel.h" -static int fentry_test(struct fentry_test_lskel *fentry_skel) +static int fentry_test_common(struct fentry_test_lskel *fentry_skel) { int err, prog_fd, i; int link_fd; @@ -37,7 +38,7 @@ static int fentry_test(struct fentry_test_lskel *fentry_skel) return 0; } -void test_fentry_test(void) +static void fentry_test(void) { struct fentry_test_lskel *fentry_skel = NULL; int err; @@ -46,13 +47,47 @@ void test_fentry_test(void) if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load")) goto cleanup; - err = fentry_test(fentry_skel); + err = fentry_test_common(fentry_skel); if (!ASSERT_OK(err, "fentry_first_attach")) goto cleanup; - err = fentry_test(fentry_skel); + err = fentry_test_common(fentry_skel); ASSERT_OK(err, "fentry_second_attach"); cleanup: fentry_test_lskel__destroy(fentry_skel); } + +static void fentry_many_args(void) +{ + struct fentry_many_args *fentry_skel = NULL; + int err; + + fentry_skel = fentry_many_args__open_and_load(); + if (!ASSERT_OK_PTR(fentry_skel, "fentry_many_args_skel_load")) + goto cleanup; + + err = fentry_many_args__attach(fentry_skel); + if (!ASSERT_OK(err, "fentry_many_args_attach")) + goto cleanup; + + ASSERT_OK(trigger_module_test_read(1), "trigger_read"); + + ASSERT_EQ(fentry_skel->bss->test1_result, 1, + "fentry_many_args_result1"); + ASSERT_EQ(fentry_skel->bss->test2_result, 1, + "fentry_many_args_result2"); + ASSERT_EQ(fentry_skel->bss->test3_result, 1, + "fentry_many_args_result3"); + +cleanup: + fentry_many_args__destroy(fentry_skel); +} + +void test_fentry_test(void) +{ + if (test__start_subtest("fentry")) + fentry_test(); + if (test__start_subtest("fentry_many_args")) + fentry_many_args(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c index 101b7343036b..1c13007e37dd 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -2,8 +2,9 @@ /* Copyright (c) 2019 Facebook */ #include <test_progs.h> #include "fexit_test.lskel.h" +#include "fexit_many_args.skel.h" -static int fexit_test(struct fexit_test_lskel *fexit_skel) +static int fexit_test_common(struct fexit_test_lskel *fexit_skel) { int err, prog_fd, i; int link_fd; @@ -37,7 +38,7 @@ static int fexit_test(struct fexit_test_lskel *fexit_skel) return 0; } -void test_fexit_test(void) +static void fexit_test(void) { struct fexit_test_lskel *fexit_skel = NULL; int err; @@ -46,13 +47,47 @@ void test_fexit_test(void) if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load")) goto cleanup; - err = fexit_test(fexit_skel); + err = fexit_test_common(fexit_skel); if (!ASSERT_OK(err, "fexit_first_attach")) goto cleanup; - err = fexit_test(fexit_skel); + err = fexit_test_common(fexit_skel); ASSERT_OK(err, "fexit_second_attach"); cleanup: fexit_test_lskel__destroy(fexit_skel); } + +static void fexit_many_args(void) +{ + struct fexit_many_args *fexit_skel = NULL; + int err; + + fexit_skel = fexit_many_args__open_and_load(); + if (!ASSERT_OK_PTR(fexit_skel, "fexit_many_args_skel_load")) + goto cleanup; + + err = fexit_many_args__attach(fexit_skel); + if (!ASSERT_OK(err, "fexit_many_args_attach")) + goto cleanup; + + ASSERT_OK(trigger_module_test_read(1), "trigger_read"); + + ASSERT_EQ(fexit_skel->bss->test1_result, 1, + "fexit_many_args_result1"); + ASSERT_EQ(fexit_skel->bss->test2_result, 1, + "fexit_many_args_result2"); + ASSERT_EQ(fexit_skel->bss->test3_result, 1, + "fexit_many_args_result3"); + +cleanup: + fexit_many_args__destroy(fexit_skel); +} + +void test_fexit_test(void) +{ + if (test__start_subtest("fexit")) + fexit_test(); + if (test__start_subtest("fexit_many_args")) + fexit_many_args(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c index a1e712105811..2fd05649bad1 100644 --- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#include <linux/rtnetlink.h> #include <sys/types.h> #include <net/if.h> @@ -15,14 +16,23 @@ #define IPV4_IFACE_ADDR "10.0.0.254" #define IPV4_NUD_FAILED_ADDR "10.0.0.1" #define IPV4_NUD_STALE_ADDR "10.0.0.2" +#define IPV4_TBID_ADDR "172.0.0.254" +#define IPV4_TBID_NET "172.0.0.0" +#define IPV4_TBID_DST "172.0.0.2" +#define IPV6_TBID_ADDR "fd00::FFFF" +#define IPV6_TBID_NET "fd00::" +#define IPV6_TBID_DST "fd00::2" #define DMAC "11:11:11:11:11:11" #define DMAC_INIT { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, } +#define DMAC2 "01:01:01:01:01:01" +#define DMAC_INIT2 { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, } struct fib_lookup_test { const char *desc; const char *daddr; int expected_ret; int lookup_flags; + __u32 tbid; __u8 dmac[6]; }; @@ -43,6 +53,22 @@ static const struct fib_lookup_test tests[] = { { .desc = "IPv4 skip neigh", .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, }, + { .desc = "IPv4 TBID lookup failure", + .daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED, + .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, + .tbid = RT_TABLE_MAIN, }, + { .desc = "IPv4 TBID lookup success", + .daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, + .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100, + .dmac = DMAC_INIT2, }, + { .desc = "IPv6 TBID lookup failure", + .daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED, + .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, + .tbid = RT_TABLE_MAIN, }, + { .desc = "IPv6 TBID lookup success", + .daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, + .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100, + .dmac = DMAC_INIT2, }, }; static int ifindex; @@ -53,6 +79,7 @@ static int setup_netns(void) SYS(fail, "ip link add veth1 type veth peer name veth2"); SYS(fail, "ip link set dev veth1 up"); + SYS(fail, "ip link set dev veth2 up"); err = write_sysctl("/proc/sys/net/ipv4/neigh/veth1/gc_stale_time", "900"); if (!ASSERT_OK(err, "write_sysctl(net.ipv4.neigh.veth1.gc_stale_time)")) @@ -70,6 +97,17 @@ static int setup_netns(void) SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR); SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC); + /* Setup for tbid lookup tests */ + SYS(fail, "ip addr add %s/24 dev veth2", IPV4_TBID_ADDR); + SYS(fail, "ip route del %s/24 dev veth2", IPV4_TBID_NET); + SYS(fail, "ip route add table 100 %s/24 dev veth2", IPV4_TBID_NET); + SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV4_TBID_DST, DMAC2); + + SYS(fail, "ip addr add %s/64 dev veth2", IPV6_TBID_ADDR); + SYS(fail, "ip -6 route del %s/64 dev veth2", IPV6_TBID_NET); + SYS(fail, "ip -6 route add table 100 %s/64 dev veth2", IPV6_TBID_NET); + SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV6_TBID_DST, DMAC2); + err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1"); if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)")) goto fail; @@ -83,7 +121,7 @@ fail: return -1; } -static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr) +static int set_lookup_params(struct bpf_fib_lookup *params, const struct fib_lookup_test *test) { int ret; @@ -91,8 +129,9 @@ static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr) params->l4_protocol = IPPROTO_TCP; params->ifindex = ifindex; + params->tbid = test->tbid; - if (inet_pton(AF_INET6, daddr, params->ipv6_dst) == 1) { + if (inet_pton(AF_INET6, test->daddr, params->ipv6_dst) == 1) { params->family = AF_INET6; ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src); if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)")) @@ -100,7 +139,7 @@ static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr) return 0; } - ret = inet_pton(AF_INET, daddr, ¶ms->ipv4_dst); + ret = inet_pton(AF_INET, test->daddr, ¶ms->ipv4_dst); if (!ASSERT_EQ(ret, 1, "convert IP[46] address")) return -1; params->family = AF_INET; @@ -154,13 +193,12 @@ void test_fib_lookup(void) fib_params = &skel->bss->fib_params; for (i = 0; i < ARRAY_SIZE(tests); i++) { - printf("Testing %s\n", tests[i].desc); + printf("Testing %s ", tests[i].desc); - if (set_lookup_params(fib_params, tests[i].daddr)) + if (set_lookup_params(fib_params, &tests[i])) continue; skel->bss->fib_lookup_ret = -1; - skel->bss->lookup_flags = BPF_FIB_LOOKUP_OUTPUT | - tests[i].lookup_flags; + skel->bss->lookup_flags = tests[i].lookup_flags; err = bpf_prog_test_run_opts(prog_fd, &run_opts); if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) @@ -175,7 +213,14 @@ void test_fib_lookup(void) mac_str(expected, tests[i].dmac); mac_str(actual, fib_params->dmac); - printf("dmac expected %s actual %s\n", expected, actual); + printf("dmac expected %s actual %s ", expected, actual); + } + + // ensure tbid is zero'd out after fib lookup. + if (tests[i].lookup_flags & BPF_FIB_LOOKUP_DIRECT) { + if (!ASSERT_EQ(skel->bss->fib_params.tbid, 0, + "expected fib_params.tbid to be zero")) + goto fail; } } diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c new file mode 100644 index 000000000000..9d768e083714 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Yafang Shao <[email protected]> */ + +#include <string.h> +#include <linux/bpf.h> +#include <linux/limits.h> +#include <test_progs.h> +#include "trace_helpers.h" +#include "test_fill_link_info.skel.h" + +#define TP_CAT "sched" +#define TP_NAME "sched_switch" + +static const char *kmulti_syms[] = { + "bpf_fentry_test2", + "bpf_fentry_test1", + "bpf_fentry_test3", +}; +#define KMULTI_CNT ARRAY_SIZE(kmulti_syms) +static __u64 kmulti_addrs[KMULTI_CNT]; + +#define KPROBE_FUNC "bpf_fentry_test1" +static __u64 kprobe_addr; + +#define UPROBE_FILE "/proc/self/exe" +static ssize_t uprobe_offset; +/* uprobe attach point */ +static noinline void uprobe_func(void) +{ + asm volatile (""); +} + +static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr, + ssize_t offset, ssize_t entry_offset) +{ + struct bpf_link_info info; + __u32 len = sizeof(info); + char buf[PATH_MAX]; + int err; + + memset(&info, 0, sizeof(info)); + buf[0] = '\0'; + +again: + err = bpf_link_get_info_by_fd(fd, &info, &len); + if (!ASSERT_OK(err, "get_link_info")) + return -1; + + if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type")) + return -1; + if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match")) + return -1; + + switch (info.perf_event.type) { + case BPF_PERF_EVENT_KPROBE: + case BPF_PERF_EVENT_KRETPROBE: + ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset"); + + /* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */ + if (addr) + ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset, + "kprobe_addr"); + + if (!info.perf_event.kprobe.func_name) { + ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len"); + info.perf_event.kprobe.func_name = ptr_to_u64(&buf); + info.perf_event.kprobe.name_len = sizeof(buf); + goto again; + } + + err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC, + strlen(KPROBE_FUNC)); + ASSERT_EQ(err, 0, "cmp_kprobe_func_name"); + break; + case BPF_PERF_EVENT_TRACEPOINT: + if (!info.perf_event.tracepoint.tp_name) { + ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len"); + info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf); + info.perf_event.tracepoint.name_len = sizeof(buf); + goto again; + } + + err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME, + strlen(TP_NAME)); + ASSERT_EQ(err, 0, "cmp_tp_name"); + break; + case BPF_PERF_EVENT_UPROBE: + case BPF_PERF_EVENT_URETPROBE: + ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset"); + + if (!info.perf_event.uprobe.file_name) { + ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len"); + info.perf_event.uprobe.file_name = ptr_to_u64(&buf); + info.perf_event.uprobe.name_len = sizeof(buf); + goto again; + } + + err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE, + strlen(UPROBE_FILE)); + ASSERT_EQ(err, 0, "cmp_file_name"); + break; + default: + err = -1; + break; + } + return err; +} + +static void kprobe_fill_invalid_user_buffer(int fd) +{ + struct bpf_link_info info; + __u32 len = sizeof(info); + int err; + + memset(&info, 0, sizeof(info)); + + info.perf_event.kprobe.func_name = 0x1; /* invalid address */ + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len"); + + info.perf_event.kprobe.name_len = 64; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EFAULT, "invalid_buff"); + + info.perf_event.kprobe.func_name = 0; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EINVAL, "invalid_len"); + + ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr"); + ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset"); + ASSERT_EQ(info.perf_event.type, 0, "type"); +} + +static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, + enum bpf_perf_event_type type, + bool invalid) +{ + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, + .attach_mode = PROBE_ATTACH_MODE_LINK, + .retprobe = type == BPF_PERF_EVENT_KRETPROBE, + ); + ssize_t entry_offset = 0; + int link_fd, err; + + skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, + KPROBE_FUNC, &opts); + if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe")) + return; + + link_fd = bpf_link__fd(skel->links.kprobe_run); + if (!invalid) { + /* See also arch_adjust_kprobe_addr(). */ + if (skel->kconfig->CONFIG_X86_KERNEL_IBT) + entry_offset = 4; + err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset); + ASSERT_OK(err, "verify_perf_link_info"); + } else { + kprobe_fill_invalid_user_buffer(link_fd); + } + bpf_link__detach(skel->links.kprobe_run); +} + +static void test_tp_fill_link_info(struct test_fill_link_info *skel) +{ + int link_fd, err; + + skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME); + if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp")) + return; + + link_fd = bpf_link__fd(skel->links.tp_run); + err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0); + ASSERT_OK(err, "verify_perf_link_info"); + bpf_link__detach(skel->links.tp_run); +} + +static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, + enum bpf_perf_event_type type) +{ + int link_fd, err; + + skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run, + type == BPF_PERF_EVENT_URETPROBE, + 0, /* self pid */ + UPROBE_FILE, uprobe_offset); + if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe")) + return; + + link_fd = bpf_link__fd(skel->links.uprobe_run); + err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0); + ASSERT_OK(err, "verify_perf_link_info"); + bpf_link__detach(skel->links.uprobe_run); +} + +static int verify_kmulti_link_info(int fd, bool retprobe) +{ + struct bpf_link_info info; + __u32 len = sizeof(info); + __u64 addrs[KMULTI_CNT]; + int flags, i, err; + + memset(&info, 0, sizeof(info)); + +again: + err = bpf_link_get_info_by_fd(fd, &info, &len); + if (!ASSERT_OK(err, "get_link_info")) + return -1; + + if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type")) + return -1; + + ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt"); + flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN; + if (!retprobe) + ASSERT_EQ(flags, 0, "kmulti_flags"); + else + ASSERT_NEQ(flags, 0, "kretmulti_flags"); + + if (!info.kprobe_multi.addrs) { + info.kprobe_multi.addrs = ptr_to_u64(addrs); + goto again; + } + for (i = 0; i < KMULTI_CNT; i++) + ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); + return 0; +} + +static void verify_kmulti_invalid_user_buffer(int fd) +{ + struct bpf_link_info info; + __u32 len = sizeof(info); + __u64 addrs[KMULTI_CNT]; + int err, i; + + memset(&info, 0, sizeof(info)); + + info.kprobe_multi.count = KMULTI_CNT; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EINVAL, "no_addr"); + + info.kprobe_multi.addrs = ptr_to_u64(addrs); + info.kprobe_multi.count = 0; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EINVAL, "no_cnt"); + + for (i = 0; i < KMULTI_CNT; i++) + addrs[i] = 0; + info.kprobe_multi.count = KMULTI_CNT - 1; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -ENOSPC, "smaller_cnt"); + for (i = 0; i < KMULTI_CNT - 1; i++) + ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); + ASSERT_EQ(addrs[i], 0, "kmulti_addrs"); + + for (i = 0; i < KMULTI_CNT; i++) + addrs[i] = 0; + info.kprobe_multi.count = KMULTI_CNT + 1; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, 0, "bigger_cnt"); + for (i = 0; i < KMULTI_CNT; i++) + ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); + + info.kprobe_multi.count = KMULTI_CNT; + info.kprobe_multi.addrs = 0x1; /* invalid addr */ + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EFAULT, "invalid_buff"); +} + +static int symbols_cmp_r(const void *a, const void *b) +{ + const char **str_a = (const char **) a; + const char **str_b = (const char **) b; + + return strcmp(*str_a, *str_b); +} + +static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, + bool retprobe, bool invalid) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + int link_fd, err; + + opts.syms = kmulti_syms; + opts.cnt = KMULTI_CNT; + opts.retprobe = retprobe; + skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, + NULL, &opts); + if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi")) + return; + + link_fd = bpf_link__fd(skel->links.kmulti_run); + if (!invalid) { + err = verify_kmulti_link_info(link_fd, retprobe); + ASSERT_OK(err, "verify_kmulti_link_info"); + } else { + verify_kmulti_invalid_user_buffer(link_fd); + } + bpf_link__detach(skel->links.kmulti_run); +} + +void test_fill_link_info(void) +{ + struct test_fill_link_info *skel; + int i; + + skel = test_fill_link_info__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + /* load kallsyms to compare the addr */ + if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh")) + goto cleanup; + + kprobe_addr = ksym_get_addr(KPROBE_FUNC); + if (test__start_subtest("kprobe_link_info")) + test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false); + if (test__start_subtest("kretprobe_link_info")) + test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, false); + if (test__start_subtest("kprobe_invalid_ubuff")) + test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true); + if (test__start_subtest("tracepoint_link_info")) + test_tp_fill_link_info(skel); + + uprobe_offset = get_uprobe_offset(&uprobe_func); + if (test__start_subtest("uprobe_link_info")) + test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE); + if (test__start_subtest("uretprobe_link_info")) + test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE); + + qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r); + for (i = 0; i < KMULTI_CNT; i++) + kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]); + if (test__start_subtest("kprobe_multi_link_info")) + test_kprobe_multi_fill_link_info(skel, false, false); + if (test__start_subtest("kretprobe_multi_link_info")) + test_kprobe_multi_fill_link_info(skel, true, false); + if (test__start_subtest("kprobe_multi_invalid_ubuff")) + test_kprobe_multi_fill_link_info(skel, true, true); + +cleanup: + test_fill_link_info__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c index 28cf63963cb7..64a9c95d4acf 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c @@ -30,7 +30,9 @@ void test_get_func_args_test(void) prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(topts.retval, 1234, "test_run"); + + ASSERT_EQ(topts.retval >> 16, 1, "test_run"); + ASSERT_EQ(topts.retval & 0xffff, 1234 + 29, "test_run"); ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c index fede8ef58b5b..c40242dfa8fb 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -1,6 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #include "get_func_ip_test.skel.h" +#include "get_func_ip_uprobe_test.skel.h" + +static noinline void uprobe_trigger(void) +{ +} static void test_function_entry(void) { @@ -20,6 +25,8 @@ static void test_function_entry(void) if (!ASSERT_OK(err, "get_func_ip_test__attach")) goto cleanup; + skel->bss->uprobe_trigger = (unsigned long) uprobe_trigger; + prog_fd = bpf_program__fd(skel->progs.test1); err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); @@ -30,21 +37,31 @@ static void test_function_entry(void) ASSERT_OK(err, "test_run"); + uprobe_trigger(); + ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); + ASSERT_EQ(skel->bss->test7_result, 1, "test7_result"); + ASSERT_EQ(skel->bss->test8_result, 1, "test8_result"); cleanup: get_func_ip_test__destroy(skel); } -/* test6 is x86_64 specific because of the instruction - * offset, disabling it for all other archs - */ #ifdef __x86_64__ -static void test_function_body(void) +extern void uprobe_trigger_body(void); +asm( +".globl uprobe_trigger_body\n" +".type uprobe_trigger_body, @function\n" +"uprobe_trigger_body:\n" +" nop\n" +" ret\n" +); + +static void test_function_body_kprobe(void) { struct get_func_ip_test *skel = NULL; LIBBPF_OPTS(bpf_test_run_opts, topts); @@ -56,6 +73,9 @@ static void test_function_body(void) if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) return; + /* test6 is x86_64 specific and is disabled by default, + * enable it for body test. + */ bpf_program__set_autoload(skel->progs.test6, true); err = get_func_ip_test__load(skel); @@ -79,6 +99,35 @@ cleanup: bpf_link__destroy(link6); get_func_ip_test__destroy(skel); } + +static void test_function_body_uprobe(void) +{ + struct get_func_ip_uprobe_test *skel = NULL; + int err; + + skel = get_func_ip_uprobe_test__open_and_load(); + if (!ASSERT_OK_PTR(skel, "get_func_ip_uprobe_test__open_and_load")) + return; + + err = get_func_ip_uprobe_test__attach(skel); + if (!ASSERT_OK(err, "get_func_ip_test__attach")) + goto cleanup; + + skel->bss->uprobe_trigger_body = (unsigned long) uprobe_trigger_body; + + uprobe_trigger_body(); + + ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); + +cleanup: + get_func_ip_uprobe_test__destroy(skel); +} + +static void test_function_body(void) +{ + test_function_body_kprobe(); + test_function_body_uprobe(); +} #else #define test_function_body() #endif diff --git a/tools/testing/selftests/bpf/prog_tests/global_map_resize.c b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c new file mode 100644 index 000000000000..56b5baef35c8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#include <errno.h> +#include <sys/syscall.h> +#include <unistd.h> +#include "test_global_map_resize.skel.h" +#include "test_progs.h" + +static void run_prog_bss_array_sum(void) +{ + (void)syscall(__NR_getpid); +} + +static void run_prog_data_array_sum(void) +{ + (void)syscall(__NR_getuid); +} + +static void global_map_resize_bss_subtest(void) +{ + int err; + struct test_global_map_resize *skel; + struct bpf_map *map; + const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2; + size_t array_len, actual_sz, new_sz; + + skel = test_global_map_resize__open(); + if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) + goto teardown; + + /* set some initial value before resizing. + * it is expected this non-zero value will be preserved + * while resizing. + */ + skel->bss->array[0] = 1; + + /* resize map value and verify the new size */ + map = skel->maps.bss; + err = bpf_map__set_value_size(map, desired_sz); + if (!ASSERT_OK(err, "bpf_map__set_value_size")) + goto teardown; + if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize")) + goto teardown; + + new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus(); + err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); + ASSERT_OK(err, "percpu_arr_resize"); + + /* set the expected number of elements based on the resized array */ + array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]); + if (!ASSERT_GT(array_len, 1, "array_len")) + goto teardown; + + skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz); + if (!ASSERT_OK_PTR(skel->bss, "bpf_map__initial_value (ptr)")) + goto teardown; + if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)")) + goto teardown; + + /* fill the newly resized array with ones, + * skipping the first element which was previously set + */ + for (int i = 1; i < array_len; i++) + skel->bss->array[i] = 1; + + /* set global const values before loading */ + skel->rodata->pid = getpid(); + skel->rodata->bss_array_len = array_len; + skel->rodata->data_array_len = 1; + + err = test_global_map_resize__load(skel); + if (!ASSERT_OK(err, "test_global_map_resize__load")) + goto teardown; + err = test_global_map_resize__attach(skel); + if (!ASSERT_OK(err, "test_global_map_resize__attach")) + goto teardown; + + /* run the bpf program which will sum the contents of the array. + * since the array was filled with ones,verify the sum equals array_len + */ + run_prog_bss_array_sum(); + if (!ASSERT_EQ(skel->bss->sum, array_len, "sum")) + goto teardown; + +teardown: + test_global_map_resize__destroy(skel); +} + +static void global_map_resize_data_subtest(void) +{ + struct test_global_map_resize *skel; + struct bpf_map *map; + const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2; + size_t array_len, actual_sz, new_sz; + int err; + + skel = test_global_map_resize__open(); + if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) + goto teardown; + + /* set some initial value before resizing. + * it is expected this non-zero value will be preserved + * while resizing. + */ + skel->data_custom->my_array[0] = 1; + + /* resize map value and verify the new size */ + map = skel->maps.data_custom; + err = bpf_map__set_value_size(map, desired_sz); + if (!ASSERT_OK(err, "bpf_map__set_value_size")) + goto teardown; + if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize")) + goto teardown; + + new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus(); + err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); + ASSERT_OK(err, "percpu_arr_resize"); + + /* set the expected number of elements based on the resized array */ + array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]); + if (!ASSERT_GT(array_len, 1, "array_len")) + goto teardown; + + skel->data_custom = bpf_map__initial_value(skel->maps.data_custom, &actual_sz); + if (!ASSERT_OK_PTR(skel->data_custom, "bpf_map__initial_value (ptr)")) + goto teardown; + if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)")) + goto teardown; + + /* fill the newly resized array with ones, + * skipping the first element which was previously set + */ + for (int i = 1; i < array_len; i++) + skel->data_custom->my_array[i] = 1; + + /* set global const values before loading */ + skel->rodata->pid = getpid(); + skel->rodata->bss_array_len = 1; + skel->rodata->data_array_len = array_len; + + err = test_global_map_resize__load(skel); + if (!ASSERT_OK(err, "test_global_map_resize__load")) + goto teardown; + err = test_global_map_resize__attach(skel); + if (!ASSERT_OK(err, "test_global_map_resize__attach")) + goto teardown; + + /* run the bpf program which will sum the contents of the array. + * since the array was filled with ones,verify the sum equals array_len + */ + run_prog_data_array_sum(); + if (!ASSERT_EQ(skel->bss->sum, array_len, "sum")) + goto teardown; + +teardown: + test_global_map_resize__destroy(skel); +} + +static void global_map_resize_invalid_subtest(void) +{ + int err; + struct test_global_map_resize *skel; + struct bpf_map *map; + __u32 element_sz, desired_sz; + + skel = test_global_map_resize__open(); + if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) + return; + + /* attempt to resize a global datasec map to size + * which does NOT align with array + */ + map = skel->maps.data_custom; + if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.custom initial btf")) + goto teardown; + /* set desired size a fraction of element size beyond an aligned size */ + element_sz = sizeof(skel->data_custom->my_array[0]); + desired_sz = element_sz + element_sz / 2; + /* confirm desired size does NOT align with array */ + if (!ASSERT_NEQ(desired_sz % element_sz, 0, "my_array alignment")) + goto teardown; + err = bpf_map__set_value_size(map, desired_sz); + /* confirm resize is OK but BTF info is cleared */ + if (!ASSERT_OK(err, ".data.custom bpf_map__set_value_size") || + !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.custom clear btf key") || + !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.custom clear btf val")) + goto teardown; + + /* attempt to resize a global datasec map whose only var is NOT an array */ + map = skel->maps.data_non_array; + if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array initial btf")) + goto teardown; + /* set desired size to arbitrary value */ + desired_sz = 1024; + err = bpf_map__set_value_size(map, desired_sz); + /* confirm resize is OK but BTF info is cleared */ + if (!ASSERT_OK(err, ".data.non_array bpf_map__set_value_size") || + !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.non_array clear btf key") || + !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array clear btf val")) + goto teardown; + + /* attempt to resize a global datasec map + * whose last var is NOT an array + */ + map = skel->maps.data_array_not_last; + if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last initial btf")) + goto teardown; + /* set desired size to a multiple of element size */ + element_sz = sizeof(skel->data_array_not_last->my_array_first[0]); + desired_sz = element_sz * 8; + /* confirm desired size aligns with array */ + if (!ASSERT_EQ(desired_sz % element_sz, 0, "my_array_first alignment")) + goto teardown; + err = bpf_map__set_value_size(map, desired_sz); + /* confirm resize is OK but BTF info is cleared */ + if (!ASSERT_OK(err, ".data.array_not_last bpf_map__set_value_size") || + !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.array_not_last clear btf key") || + !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last clear btf val")) + goto teardown; + +teardown: + test_global_map_resize__destroy(skel); +} + +void test_global_map_resize(void) +{ + if (test__start_subtest("global_map_resize_bss")) + global_map_resize_bss_subtest(); + + if (test__start_subtest("global_map_resize_data")) + global_map_resize_data_subtest(); + + if (test__start_subtest("global_map_resize_invalid")) + global_map_resize_invalid_subtest(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c new file mode 100644 index 000000000000..57c814f5f6a7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <net/if.h> +#include <linux/netfilter.h> +#include <network_helpers.h> +#include "ip_check_defrag.skel.h" +#include "ip_check_defrag_frags.h" + +/* + * This selftest spins up a client and an echo server, each in their own + * network namespace. The client will send a fragmented message to the server. + * The prog attached to the server will shoot down any fragments. Thus, if + * the server is able to correctly echo back the message to the client, we will + * have verified that netfilter is reassembling packets for us. + * + * Topology: + * ========= + * NS0 | NS1 + * | + * client | server + * ---------- | ---------- + * | veth0 | --------- | veth1 | + * ---------- peer ---------- + * | + * | with bpf + */ + +#define NS0 "defrag_ns0" +#define NS1 "defrag_ns1" +#define VETH0 "veth0" +#define VETH1 "veth1" +#define VETH0_ADDR "172.16.1.100" +#define VETH0_ADDR6 "fc00::100" +/* The following constants must stay in sync with `generate_udp_fragments.py` */ +#define VETH1_ADDR "172.16.1.200" +#define VETH1_ADDR6 "fc00::200" +#define CLIENT_PORT 48878 +#define SERVER_PORT 48879 +#define MAGIC_MESSAGE "THIS IS THE ORIGINAL MESSAGE, PLEASE REASSEMBLE ME" + +static int setup_topology(bool ipv6) +{ + bool up; + int i; + + SYS(fail, "ip netns add " NS0); + SYS(fail, "ip netns add " NS1); + SYS(fail, "ip link add " VETH0 " netns " NS0 " type veth peer name " VETH1 " netns " NS1); + if (ipv6) { + SYS(fail, "ip -6 -net " NS0 " addr add " VETH0_ADDR6 "/64 dev " VETH0 " nodad"); + SYS(fail, "ip -6 -net " NS1 " addr add " VETH1_ADDR6 "/64 dev " VETH1 " nodad"); + } else { + SYS(fail, "ip -net " NS0 " addr add " VETH0_ADDR "/24 dev " VETH0); + SYS(fail, "ip -net " NS1 " addr add " VETH1_ADDR "/24 dev " VETH1); + } + SYS(fail, "ip -net " NS0 " link set dev " VETH0 " up"); + SYS(fail, "ip -net " NS1 " link set dev " VETH1 " up"); + + /* Wait for up to 5s for links to come up */ + for (i = 0; i < 5; ++i) { + if (ipv6) + up = !system("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6 " &>/dev/null"); + else + up = !system("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR " &>/dev/null"); + + if (up) + break; + } + + return 0; +fail: + return -1; +} + +static void cleanup_topology(void) +{ + SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " NS0); + SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " NS1); +} + +static int attach(struct ip_check_defrag *skel, bool ipv6) +{ + LIBBPF_OPTS(bpf_netfilter_opts, opts, + .pf = ipv6 ? NFPROTO_IPV6 : NFPROTO_IPV4, + .priority = 42, + .flags = BPF_F_NETFILTER_IP_DEFRAG); + struct nstoken *nstoken; + int err = -1; + + nstoken = open_netns(NS1); + + skel->links.defrag = bpf_program__attach_netfilter(skel->progs.defrag, &opts); + if (!ASSERT_OK_PTR(skel->links.defrag, "program attach")) + goto out; + + err = 0; +out: + close_netns(nstoken); + return err; +} + +static int send_frags(int client) +{ + struct sockaddr_storage saddr; + struct sockaddr *saddr_p; + socklen_t saddr_len; + int err; + + saddr_p = (struct sockaddr *)&saddr; + err = make_sockaddr(AF_INET, VETH1_ADDR, SERVER_PORT, &saddr, &saddr_len); + if (!ASSERT_OK(err, "make_sockaddr")) + return -1; + + err = sendto(client, frag_0, sizeof(frag_0), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag_0")) + return -1; + + err = sendto(client, frag_1, sizeof(frag_1), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag_1")) + return -1; + + err = sendto(client, frag_2, sizeof(frag_2), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag_2")) + return -1; + + return 0; +} + +static int send_frags6(int client) +{ + struct sockaddr_storage saddr; + struct sockaddr *saddr_p; + socklen_t saddr_len; + int err; + + saddr_p = (struct sockaddr *)&saddr; + /* Port needs to be set to 0 for raw ipv6 socket for some reason */ + err = make_sockaddr(AF_INET6, VETH1_ADDR6, 0, &saddr, &saddr_len); + if (!ASSERT_OK(err, "make_sockaddr")) + return -1; + + err = sendto(client, frag6_0, sizeof(frag6_0), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag6_0")) + return -1; + + err = sendto(client, frag6_1, sizeof(frag6_1), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag6_1")) + return -1; + + err = sendto(client, frag6_2, sizeof(frag6_2), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag6_2")) + return -1; + + return 0; +} + +void test_bpf_ip_check_defrag_ok(bool ipv6) +{ + struct network_helper_opts rx_opts = { + .timeout_ms = 1000, + .noconnect = true, + }; + struct network_helper_opts tx_ops = { + .timeout_ms = 1000, + .type = SOCK_RAW, + .proto = IPPROTO_RAW, + .noconnect = true, + }; + struct sockaddr_storage caddr; + struct ip_check_defrag *skel; + struct nstoken *nstoken; + int client_tx_fd = -1; + int client_rx_fd = -1; + socklen_t caddr_len; + int srv_fd = -1; + char buf[1024]; + int len, err; + + skel = ip_check_defrag__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + if (!ASSERT_OK(setup_topology(ipv6), "setup_topology")) + goto out; + + if (!ASSERT_OK(attach(skel, ipv6), "attach")) + goto out; + + /* Start server in ns1 */ + nstoken = open_netns(NS1); + if (!ASSERT_OK_PTR(nstoken, "setns ns1")) + goto out; + srv_fd = start_server(ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, NULL, SERVER_PORT, 0); + close_netns(nstoken); + if (!ASSERT_GE(srv_fd, 0, "start_server")) + goto out; + + /* Open tx raw socket in ns0 */ + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns ns0")) + goto out; + client_tx_fd = connect_to_fd_opts(srv_fd, &tx_ops); + close_netns(nstoken); + if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts")) + goto out; + + /* Open rx socket in ns0 */ + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns ns0")) + goto out; + client_rx_fd = connect_to_fd_opts(srv_fd, &rx_opts); + close_netns(nstoken); + if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts")) + goto out; + + /* Bind rx socket to a premeditated port */ + memset(&caddr, 0, sizeof(caddr)); + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns ns0")) + goto out; + if (ipv6) { + struct sockaddr_in6 *c = (struct sockaddr_in6 *)&caddr; + + c->sin6_family = AF_INET6; + inet_pton(AF_INET6, VETH0_ADDR6, &c->sin6_addr); + c->sin6_port = htons(CLIENT_PORT); + err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c)); + } else { + struct sockaddr_in *c = (struct sockaddr_in *)&caddr; + + c->sin_family = AF_INET; + inet_pton(AF_INET, VETH0_ADDR, &c->sin_addr); + c->sin_port = htons(CLIENT_PORT); + err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c)); + } + close_netns(nstoken); + if (!ASSERT_OK(err, "bind")) + goto out; + + /* Send message in fragments */ + if (ipv6) { + if (!ASSERT_OK(send_frags6(client_tx_fd), "send_frags6")) + goto out; + } else { + if (!ASSERT_OK(send_frags(client_tx_fd), "send_frags")) + goto out; + } + + if (!ASSERT_EQ(skel->bss->shootdowns, 0, "shootdowns")) + goto out; + + /* Receive reassembled msg on server and echo back to client */ + caddr_len = sizeof(caddr); + len = recvfrom(srv_fd, buf, sizeof(buf), 0, (struct sockaddr *)&caddr, &caddr_len); + if (!ASSERT_GE(len, 0, "server recvfrom")) + goto out; + len = sendto(srv_fd, buf, len, 0, (struct sockaddr *)&caddr, caddr_len); + if (!ASSERT_GE(len, 0, "server sendto")) + goto out; + + /* Expect reassembed message to be echoed back */ + len = recvfrom(client_rx_fd, buf, sizeof(buf), 0, NULL, NULL); + if (!ASSERT_EQ(len, sizeof(MAGIC_MESSAGE) - 1, "client short read")) + goto out; + +out: + if (client_rx_fd != -1) + close(client_rx_fd); + if (client_tx_fd != -1) + close(client_tx_fd); + if (srv_fd != -1) + close(srv_fd); + cleanup_topology(); + ip_check_defrag__destroy(skel); +} + +void test_bpf_ip_check_defrag(void) +{ + if (test__start_subtest("v4")) + test_bpf_ip_check_defrag_ok(false); + if (test__start_subtest("v6")) + test_bpf_ip_check_defrag_ok(true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index a543742cd7bd..2eb71559713c 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -173,8 +173,8 @@ static void verify_fail(struct kfunc_test_params *param) case tc_test: topts.data_in = &pkt_v4; topts.data_size_in = sizeof(pkt_v4); - break; topts.repeat = 1; + break; } skel = kfunc_call_fail__open_opts(&opts); diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 2173c4bb555e..4041cfa670eb 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -3,6 +3,7 @@ #include "kprobe_multi.skel.h" #include "trace_helpers.h" #include "kprobe_multi_empty.skel.h" +#include "kprobe_multi_override.skel.h" #include "bpf/libbpf_internal.h" #include "bpf/hashmap.h" @@ -304,14 +305,6 @@ cleanup: kprobe_multi__destroy(skel); } -static inline __u64 get_time_ns(void) -{ - struct timespec t; - - clock_gettime(CLOCK_MONOTONIC, &t); - return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; -} - static size_t symbol_hash(long key, void *ctx __maybe_unused) { return str_hash((const char *) key); @@ -461,6 +454,40 @@ cleanup: } } +static void test_attach_override(void) +{ + struct kprobe_multi_override *skel = NULL; + struct bpf_link *link = NULL; + + skel = kprobe_multi_override__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load")) + goto cleanup; + + /* The test_override calls bpf_override_return so it should fail + * to attach to bpf_fentry_test1 function, which is not on error + * injection list. + */ + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override, + "bpf_fentry_test1", NULL); + if (!ASSERT_ERR_PTR(link, "override_attached_bpf_fentry_test1")) { + bpf_link__destroy(link); + goto cleanup; + } + + /* The should_fail_bio function is on error injection list, + * attach should succeed. + */ + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override, + "should_fail_bio", NULL); + if (!ASSERT_OK_PTR(link, "override_attached_should_fail_bio")) + goto cleanup; + + bpf_link__destroy(link); + +cleanup: + kprobe_multi_override__destroy(skel); +} + void serial_test_kprobe_multi_bench_attach(void) { if (test__start_subtest("kernel")) @@ -488,4 +515,6 @@ void test_kprobe_multi_test(void) test_attach_api_syms(); if (test__start_subtest("attach_api_fails")) test_attach_api_fails(); + if (test__start_subtest("attach_override")) + test_attach_override(); } diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index f63309fd0e28..18cf7b17463d 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -23,7 +23,7 @@ static struct { "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \ { #test "_missing_lock_pop_back", \ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, - TEST(kptr, 32) + TEST(kptr, 40) TEST(global, 16) TEST(map, 0) TEST(inner_map, 0) @@ -31,7 +31,7 @@ static struct { #define TEST(test, op) \ { #test "_kptr_incorrect_lock_" #op, \ "held lock and object are not in the same allocation\n" \ - "bpf_spin_lock at off=32 must be held for bpf_list_head" }, \ + "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \ { #test "_global_incorrect_lock_" #op, \ "held lock and object are not in the same allocation\n" \ "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \ @@ -84,23 +84,23 @@ static struct { { "double_push_back", "arg#1 expected pointer to allocated object" }, { "no_node_value_type", "bpf_list_node not found at offset=0" }, { "incorrect_value_type", - "operation on bpf_list_head expects arg#1 bpf_list_node at offset=40 in struct foo, " + "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, " "but arg is at offset=0 in struct bar" }, { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, - { "incorrect_node_off1", "bpf_list_node not found at offset=41" }, - { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=40 in struct foo" }, + { "incorrect_node_off1", "bpf_list_node not found at offset=49" }, + { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" }, { "no_head_type", "bpf_list_head not found at offset=0" }, { "incorrect_head_var_off1", "R1 doesn't have constant offset" }, { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, - { "incorrect_head_off1", "bpf_list_head not found at offset=17" }, + { "incorrect_head_off1", "bpf_list_head not found at offset=25" }, { "incorrect_head_off2", "bpf_list_head not found at offset=1" }, { "pop_front_off", - "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " - "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" + "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) " + "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n" "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, { "pop_back_off", - "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " - "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" + "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) " + "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n" "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, }; @@ -257,7 +257,7 @@ static struct btf *init_btf(void) hid = btf__add_struct(btf, "bpf_list_head", 16); if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head")) goto end; - nid = btf__add_struct(btf, "bpf_list_node", 16); + nid = btf__add_struct(btf, "bpf_list_node", 24); if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node")) goto end; return btf; @@ -276,7 +276,7 @@ static void list_and_rb_node_same_struct(bool refcount_field) if (!ASSERT_OK_PTR(btf, "init_btf")) return; - bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 24); + bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32); if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node")) return; @@ -286,17 +286,17 @@ static void list_and_rb_node_same_struct(bool refcount_field) return; } - id = btf__add_struct(btf, "bar", refcount_field ? 44 : 40); + id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56); if (!ASSERT_GT(id, 0, "btf__add_struct bar")) return; err = btf__add_field(btf, "a", LIST_NODE, 0, 0); if (!ASSERT_OK(err, "btf__add_field bar::a")) return; - err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 128, 0); + err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0); if (!ASSERT_OK(err, "btf__add_field bar::c")) return; if (refcount_field) { - err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 320, 0); + err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0); if (!ASSERT_OK(err, "btf__add_field bar::ref")) return; } @@ -527,7 +527,7 @@ static void test_btf(void) btf = init_btf(); if (!ASSERT_OK_PTR(btf, "init_btf")) break; - id = btf__add_struct(btf, "foo", 36); + id = btf__add_struct(btf, "foo", 44); if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -536,7 +536,7 @@ static void test_btf(void) err = btf__add_field(btf, "b", LIST_NODE, 128, 0); if (!ASSERT_OK(err, "btf__add_field foo::b")) break; - err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0); if (!ASSERT_OK(err, "btf__add_field foo::c")) break; id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); @@ -553,7 +553,7 @@ static void test_btf(void) btf = init_btf(); if (!ASSERT_OK_PTR(btf, "init_btf")) break; - id = btf__add_struct(btf, "foo", 36); + id = btf__add_struct(btf, "foo", 44); if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -562,13 +562,13 @@ static void test_btf(void) err = btf__add_field(btf, "b", LIST_NODE, 128, 0); if (!ASSERT_OK(err, "btf__add_field foo::b")) break; - err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0); if (!ASSERT_OK(err, "btf__add_field foo::c")) break; id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) break; - id = btf__add_struct(btf, "bar", 36); + id = btf__add_struct(btf, "bar", 44); if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -577,7 +577,7 @@ static void test_btf(void) err = btf__add_field(btf, "b", LIST_NODE, 128, 0); if (!ASSERT_OK(err, "btf__add_field bar::b")) break; - err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0); if (!ASSERT_OK(err, "btf__add_field bar::c")) break; id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0); @@ -594,19 +594,19 @@ static void test_btf(void) btf = init_btf(); if (!ASSERT_OK_PTR(btf, "init_btf")) break; - id = btf__add_struct(btf, "foo", 20); + id = btf__add_struct(btf, "foo", 28); if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); if (!ASSERT_OK(err, "btf__add_field foo::a")) break; - err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0); if (!ASSERT_OK(err, "btf__add_field foo::b")) break; id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0); if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a")) break; - id = btf__add_struct(btf, "bar", 16); + id = btf__add_struct(btf, "bar", 24); if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) break; err = btf__add_field(btf, "a", LIST_NODE, 0, 0); @@ -623,19 +623,19 @@ static void test_btf(void) btf = init_btf(); if (!ASSERT_OK_PTR(btf, "init_btf")) break; - id = btf__add_struct(btf, "foo", 20); + id = btf__add_struct(btf, "foo", 28); if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); if (!ASSERT_OK(err, "btf__add_field foo::a")) break; - err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0); if (!ASSERT_OK(err, "btf__add_field foo::b")) break; id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) break; - id = btf__add_struct(btf, "bar", 36); + id = btf__add_struct(btf, "bar", 44); if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -644,13 +644,13 @@ static void test_btf(void) err = btf__add_field(btf, "b", LIST_NODE, 128, 0); if (!ASSERT_OK(err, "btf__add_field bar::b")) break; - err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0); if (!ASSERT_OK(err, "btf__add_field bar::c")) break; id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0); if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a")) break; - id = btf__add_struct(btf, "baz", 16); + id = btf__add_struct(btf, "baz", 24); if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) break; err = btf__add_field(btf, "a", LIST_NODE, 0, 0); @@ -667,7 +667,7 @@ static void test_btf(void) btf = init_btf(); if (!ASSERT_OK_PTR(btf, "init_btf")) break; - id = btf__add_struct(btf, "foo", 36); + id = btf__add_struct(btf, "foo", 44); if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -676,13 +676,13 @@ static void test_btf(void) err = btf__add_field(btf, "b", LIST_NODE, 128, 0); if (!ASSERT_OK(err, "btf__add_field foo::b")) break; - err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0); if (!ASSERT_OK(err, "btf__add_field foo::c")) break; id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) break; - id = btf__add_struct(btf, "bar", 36); + id = btf__add_struct(btf, "bar", 44); if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -691,13 +691,13 @@ static void test_btf(void) err = btf__add_field(btf, "b", LIST_NODE, 128, 0); if (!ASSERT_OK(err, "btf__add_field bar:b")) break; - err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0); if (!ASSERT_OK(err, "btf__add_field bar:c")) break; id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0); if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a")) break; - id = btf__add_struct(btf, "baz", 16); + id = btf__add_struct(btf, "baz", 24); if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) break; err = btf__add_field(btf, "a", LIST_NODE, 0, 0); @@ -726,7 +726,7 @@ static void test_btf(void) id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) break; - id = btf__add_struct(btf, "bar", 36); + id = btf__add_struct(btf, "bar", 44); if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -735,13 +735,13 @@ static void test_btf(void) err = btf__add_field(btf, "b", LIST_NODE, 128, 0); if (!ASSERT_OK(err, "btf__add_field bar::b")) break; - err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0); if (!ASSERT_OK(err, "btf__add_field bar::c")) break; id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0); if (!ASSERT_EQ(id, 8, "btf__add_decl_tag")) break; - id = btf__add_struct(btf, "baz", 36); + id = btf__add_struct(btf, "baz", 44); if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) break; err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -750,13 +750,13 @@ static void test_btf(void) err = btf__add_field(btf, "b", LIST_NODE, 128, 0); if (!ASSERT_OK(err, "btf__add_field bar::b")) break; - err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0); if (!ASSERT_OK(err, "btf__add_field bar::c")) break; id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0); if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a")) break; - id = btf__add_struct(btf, "bam", 16); + id = btf__add_struct(btf, "bam", 24); if (!ASSERT_EQ(id, 11, "btf__add_struct bam")) break; err = btf__add_field(btf, "a", LIST_NODE, 0, 0); diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c index 76f1da877f81..b25b870f87ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c @@ -5,6 +5,7 @@ #include <network_helpers.h> #include "local_kptr_stash.skel.h" +#include "local_kptr_stash_fail.skel.h" static void test_local_kptr_stash_simple(void) { LIBBPF_OPTS(bpf_test_run_opts, opts, @@ -26,6 +27,27 @@ static void test_local_kptr_stash_simple(void) local_kptr_stash__destroy(skel); } +static void test_local_kptr_stash_plain(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct local_kptr_stash *skel; + int ret; + + skel = local_kptr_stash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_plain), &opts); + ASSERT_OK(ret, "local_kptr_stash_add_plain run"); + ASSERT_OK(opts.retval, "local_kptr_stash_add_plain retval"); + + local_kptr_stash__destroy(skel); +} + static void test_local_kptr_stash_unstash(void) { LIBBPF_OPTS(bpf_test_run_opts, opts, @@ -51,10 +73,19 @@ static void test_local_kptr_stash_unstash(void) local_kptr_stash__destroy(skel); } -void test_local_kptr_stash_success(void) +static void test_local_kptr_stash_fail(void) +{ + RUN_TESTS(local_kptr_stash_fail); +} + +void test_local_kptr_stash(void) { if (test__start_subtest("local_kptr_stash_simple")) test_local_kptr_stash_simple(); + if (test__start_subtest("local_kptr_stash_plain")) + test_local_kptr_stash_plain(); if (test__start_subtest("local_kptr_stash_unstash")) test_local_kptr_stash_unstash(); + if (test__start_subtest("local_kptr_stash_fail")) + test_local_kptr_stash_fail(); } diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index dba71d98a227..effd78b2a657 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -124,7 +124,7 @@ static void missing_map(void) ASSERT_FALSE(bpf_map__autocreate(skel->maps.missing_map), "missing_map_autocreate"); ASSERT_HAS_SUBSTR(log_buf, - "8: <invalid BPF map reference>\n" + ": <invalid BPF map reference>\n" "BPF map 'missing_map' is referenced but wasn't created\n", "log_buf"); diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h new file mode 100644 index 000000000000..61333f2a03f9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LWT_HELPERS_H +#define __LWT_HELPERS_H + +#include <time.h> +#include <net/if.h> +#include <linux/if_tun.h> +#include <linux/icmp.h> + +#include "test_progs.h" + +#define log_err(MSG, ...) \ + fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ + __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__) + +#define RUN_TEST(name) \ + ({ \ + if (test__start_subtest(#name)) \ + if (ASSERT_OK(netns_create(), "netns_create")) { \ + struct nstoken *token = open_netns(NETNS); \ + if (ASSERT_OK_PTR(token, "setns")) { \ + test_ ## name(); \ + close_netns(token); \ + } \ + netns_delete(); \ + } \ + }) + +#define NETNS "ns_lwt" + +static inline int netns_create(void) +{ + return system("ip netns add " NETNS); +} + +static inline int netns_delete(void) +{ + return system("ip netns del " NETNS ">/dev/null 2>&1"); +} + +static int open_tuntap(const char *dev_name, bool need_mac) +{ + int err = 0; + struct ifreq ifr; + int fd = open("/dev/net/tun", O_RDWR); + + if (!ASSERT_GT(fd, 0, "open(/dev/net/tun)")) + return -1; + + ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); + memcpy(ifr.ifr_name, dev_name, IFNAMSIZ); + + err = ioctl(fd, TUNSETIFF, &ifr); + if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) { + close(fd); + return -1; + } + + err = fcntl(fd, F_SETFL, O_NONBLOCK); + if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { + close(fd); + return -1; + } + + return fd; +} + +#define ICMP_PAYLOAD_SIZE 100 + +/* Match an ICMP packet with payload len ICMP_PAYLOAD_SIZE */ +static int __expect_icmp_ipv4(char *buf, ssize_t len) +{ + struct iphdr *ip = (struct iphdr *)buf; + struct icmphdr *icmp = (struct icmphdr *)(ip + 1); + ssize_t min_header_len = sizeof(*ip) + sizeof(*icmp); + + if (len < min_header_len) + return -1; + + if (ip->protocol != IPPROTO_ICMP) + return -1; + + if (icmp->type != ICMP_ECHO) + return -1; + + return len == ICMP_PAYLOAD_SIZE + min_header_len; +} + +typedef int (*filter_t) (char *, ssize_t); + +/* wait_for_packet - wait for a packet that matches the filter + * + * @fd: tun fd/packet socket to read packet + * @filter: filter function, returning 1 if matches + * @timeout: timeout to wait for the packet + * + * Returns 1 if a matching packet is read, 0 if timeout expired, -1 on error. + */ +static int wait_for_packet(int fd, filter_t filter, struct timeval *timeout) +{ + char buf[4096]; + int max_retry = 5; /* in case we read some spurious packets */ + fd_set fds; + + FD_ZERO(&fds); + while (max_retry--) { + /* Linux modifies timeout arg... So make a copy */ + struct timeval copied_timeout = *timeout; + ssize_t ret = -1; + + FD_SET(fd, &fds); + + ret = select(1 + fd, &fds, NULL, NULL, &copied_timeout); + if (ret <= 0) { + if (errno == EINTR) + continue; + else if (errno == EAGAIN || ret == 0) + return 0; + + log_err("select failed"); + return -1; + } + + ret = read(fd, buf, sizeof(buf)); + + if (ret <= 0) { + log_err("read(dev): %ld", ret); + return -1; + } + + if (filter && filter(buf, ret) > 0) + return 1; + } + + return 0; +} + +#endif /* __LWT_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c new file mode 100644 index 000000000000..59b38569f310 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * Test suite of lwt_xmit BPF programs that redirect packets + * The file tests focus not only if these programs work as expected normally, + * but also if they can handle abnormal situations gracefully. + * + * WARNING + * ------- + * This test suite may crash the kernel, thus should be run in a VM. + * + * Setup: + * --------- + * All tests are performed in a single netns. Two lwt encap routes are setup for + * each subtest: + * + * ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<ingress_sec>" dev link_err + * ip route add 20.0.0.0/24 encap bpf xmit <obj> sec "<egress_sec>" dev link_err + * + * Here <obj> is statically defined to test_lwt_redirect.bpf.o, and each section + * of this object holds a program entry to test. The BPF object is built from + * progs/test_lwt_redirect.c. We didn't use generated BPF skeleton since the + * attachment for lwt programs are not supported by libbpf yet. + * + * For testing, ping commands are run in the test netns: + * + * ping 10.0.0.<ifindex> -c 1 -w 1 -s 100 + * ping 20.0.0.<ifindex> -c 1 -w 1 -s 100 + * + * Scenarios: + * -------------------------------- + * 1. Redirect to a running tap/tun device + * 2. Redirect to a down tap/tun device + * 3. Redirect to a vlan device with lower layer down + * + * Case 1, ping packets should be received by packet socket on target device + * when redirected to ingress, and by tun/tap fd when redirected to egress. + * + * Case 2,3 are considered successful as long as they do not crash the kernel + * as a regression. + * + * Case 1,2 use tap device to test redirect to device that requires MAC + * header, and tun device to test the case with no MAC header added. + */ +#include <sys/socket.h> +#include <net/if.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/if_tun.h> +#include <linux/icmp.h> +#include <arpa/inet.h> +#include <unistd.h> +#include <errno.h> +#include <stdbool.h> +#include <stdlib.h> + +#include "lwt_helpers.h" +#include "test_progs.h" +#include "network_helpers.h" + +#define BPF_OBJECT "test_lwt_redirect.bpf.o" +#define INGRESS_SEC(need_mac) ((need_mac) ? "redir_ingress" : "redir_ingress_nomac") +#define EGRESS_SEC(need_mac) ((need_mac) ? "redir_egress" : "redir_egress_nomac") +#define LOCAL_SRC "10.0.0.1" +#define CIDR_TO_INGRESS "10.0.0.0/24" +#define CIDR_TO_EGRESS "20.0.0.0/24" + +/* ping to redirect toward given dev, with last byte of dest IP being the target + * device index. + * + * Note: ping command inside BPF-CI is busybox version, so it does not have certain + * function, such like -m option to set packet mark. + */ +static void ping_dev(const char *dev, bool is_ingress) +{ + int link_index = if_nametoindex(dev); + char ip[256]; + + if (!ASSERT_GE(link_index, 0, "if_nametoindex")) + return; + + if (is_ingress) + snprintf(ip, sizeof(ip), "10.0.0.%d", link_index); + else + snprintf(ip, sizeof(ip), "20.0.0.%d", link_index); + + /* We won't get a reply. Don't fail here */ + SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", + ip, ICMP_PAYLOAD_SIZE); +} + +static int new_packet_sock(const char *ifname) +{ + int err = 0; + int ignore_outgoing = 1; + int ifindex = -1; + int s = -1; + + s = socket(AF_PACKET, SOCK_RAW, 0); + if (!ASSERT_GE(s, 0, "socket(AF_PACKET)")) + return -1; + + ifindex = if_nametoindex(ifname); + if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) { + close(s); + return -1; + } + + struct sockaddr_ll addr = { + .sll_family = AF_PACKET, + .sll_protocol = htons(ETH_P_IP), + .sll_ifindex = ifindex, + }; + + err = bind(s, (struct sockaddr *)&addr, sizeof(addr)); + if (!ASSERT_OK(err, "bind(AF_PACKET)")) { + close(s); + return -1; + } + + /* Use packet socket to capture only the ingress, so we can distinguish + * the case where a regression that actually redirects the packet to + * the egress. + */ + err = setsockopt(s, SOL_PACKET, PACKET_IGNORE_OUTGOING, + &ignore_outgoing, sizeof(ignore_outgoing)); + if (!ASSERT_OK(err, "setsockopt(PACKET_IGNORE_OUTGOING)")) { + close(s); + return -1; + } + + err = fcntl(s, F_SETFL, O_NONBLOCK); + if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { + close(s); + return -1; + } + + return s; +} + +static int expect_icmp(char *buf, ssize_t len) +{ + struct ethhdr *eth = (struct ethhdr *)buf; + + if (len < (ssize_t)sizeof(*eth)) + return -1; + + if (eth->h_proto == htons(ETH_P_IP)) + return __expect_icmp_ipv4((char *)(eth + 1), len - sizeof(*eth)); + + return -1; +} + +static int expect_icmp_nomac(char *buf, ssize_t len) +{ + return __expect_icmp_ipv4(buf, len); +} + +static void send_and_capture_test_packets(const char *test_name, int tap_fd, + const char *target_dev, bool need_mac) +{ + int psock = -1; + struct timeval timeo = { + .tv_sec = 0, + .tv_usec = 250000, + }; + int ret = -1; + + filter_t filter = need_mac ? expect_icmp : expect_icmp_nomac; + + ping_dev(target_dev, false); + + ret = wait_for_packet(tap_fd, filter, &timeo); + if (!ASSERT_EQ(ret, 1, "wait_for_epacket")) { + log_err("%s egress test fails", test_name); + goto out; + } + + psock = new_packet_sock(target_dev); + ping_dev(target_dev, true); + + ret = wait_for_packet(psock, filter, &timeo); + if (!ASSERT_EQ(ret, 1, "wait_for_ipacket")) { + log_err("%s ingress test fails", test_name); + goto out; + } + +out: + if (psock >= 0) + close(psock); +} + +static int setup_redirect_target(const char *target_dev, bool need_mac) +{ + int target_index = -1; + int tap_fd = -1; + + tap_fd = open_tuntap(target_dev, need_mac); + if (!ASSERT_GE(tap_fd, 0, "open_tuntap")) + goto fail; + + target_index = if_nametoindex(target_dev); + if (!ASSERT_GE(target_index, 0, "if_nametoindex")) + goto fail; + + SYS(fail, "ip link add link_err type dummy"); + SYS(fail, "ip link set lo up"); + SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32"); + SYS(fail, "ip link set link_err up"); + SYS(fail, "ip link set %s up", target_dev); + + SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s", + CIDR_TO_INGRESS, BPF_OBJECT, INGRESS_SEC(need_mac)); + + SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s", + CIDR_TO_EGRESS, BPF_OBJECT, EGRESS_SEC(need_mac)); + + return tap_fd; + +fail: + if (tap_fd >= 0) + close(tap_fd); + return -1; +} + +static void test_lwt_redirect_normal(void) +{ + const char *target_dev = "tap0"; + int tap_fd = -1; + bool need_mac = true; + + tap_fd = setup_redirect_target(target_dev, need_mac); + if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) + return; + + send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac); + close(tap_fd); +} + +static void test_lwt_redirect_normal_nomac(void) +{ + const char *target_dev = "tun0"; + int tap_fd = -1; + bool need_mac = false; + + tap_fd = setup_redirect_target(target_dev, need_mac); + if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) + return; + + send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac); + close(tap_fd); +} + +/* This test aims to prevent regression of future. As long as the kernel does + * not panic, it is considered as success. + */ +static void __test_lwt_redirect_dev_down(bool need_mac) +{ + const char *target_dev = "tap0"; + int tap_fd = -1; + + tap_fd = setup_redirect_target(target_dev, need_mac); + if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) + return; + + SYS(out, "ip link set %s down", target_dev); + ping_dev(target_dev, true); + ping_dev(target_dev, false); + +out: + close(tap_fd); +} + +static void test_lwt_redirect_dev_down(void) +{ + __test_lwt_redirect_dev_down(true); +} + +static void test_lwt_redirect_dev_down_nomac(void) +{ + __test_lwt_redirect_dev_down(false); +} + +/* This test aims to prevent regression of future. As long as the kernel does + * not panic, it is considered as success. + */ +static void test_lwt_redirect_dev_carrier_down(void) +{ + const char *lower_dev = "tap0"; + const char *vlan_dev = "vlan100"; + int tap_fd = -1; + + tap_fd = setup_redirect_target(lower_dev, true); + if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) + return; + + SYS(out, "ip link add vlan100 link %s type vlan id 100", lower_dev); + SYS(out, "ip link set %s up", vlan_dev); + SYS(out, "ip link set %s down", lower_dev); + ping_dev(vlan_dev, true); + ping_dev(vlan_dev, false); + +out: + close(tap_fd); +} + +static void *test_lwt_redirect_run(void *arg) +{ + netns_delete(); + RUN_TEST(lwt_redirect_normal); + RUN_TEST(lwt_redirect_normal_nomac); + RUN_TEST(lwt_redirect_dev_down); + RUN_TEST(lwt_redirect_dev_down_nomac); + RUN_TEST(lwt_redirect_dev_carrier_down); + return NULL; +} + +void test_lwt_redirect(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_lwt_redirect_run, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c new file mode 100644 index 000000000000..f4bb2d5fcae0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * Test suite of lwt BPF programs that reroutes packets + * The file tests focus not only if these programs work as expected normally, + * but also if they can handle abnormal situations gracefully. This test + * suite currently only covers lwt_xmit hook. lwt_in tests have not been + * implemented. + * + * WARNING + * ------- + * This test suite can crash the kernel, thus should be run in a VM. + * + * Setup: + * --------- + * all tests are performed in a single netns. A lwt encap route is setup for + * each subtest: + * + * ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<section_N>" dev link_err + * + * Here <obj> is statically defined to test_lwt_reroute.bpf.o, and it contains + * a single test program entry. This program sets packet mark by last byte of + * the IPv4 daddr. For example, a packet going to 1.2.3.4 will receive a skb + * mark 4. A packet will only be marked once, and IP x.x.x.0 will be skipped + * to avoid route loop. We didn't use generated BPF skeleton since the + * attachment for lwt programs are not supported by libbpf yet. + * + * The test program will bring up a tun device, and sets up the following + * routes: + * + * ip rule add pref 100 from all fwmark <tun_index> lookup 100 + * ip route add table 100 default dev tun0 + * + * For normal testing, a ping command is running in the test netns: + * + * ping 10.0.0.<tun_index> -c 1 -w 1 -s 100 + * + * For abnormal testing, fq is used as the qdisc of the tun device. Then a UDP + * socket will try to overflow the fq queue and trigger qdisc drop error. + * + * Scenarios: + * -------------------------------- + * 1. Reroute to a running tun device + * 2. Reroute to a device where qdisc drop + * + * For case 1, ping packets should be received by the tun device. + * + * For case 2, force UDP packets to overflow fq limit. As long as kernel + * is not crashed, it is considered successful. + */ +#include "lwt_helpers.h" +#include "network_helpers.h" +#include <linux/net_tstamp.h> + +#define BPF_OBJECT "test_lwt_reroute.bpf.o" +#define LOCAL_SRC "10.0.0.1" +#define TEST_CIDR "10.0.0.0/24" +#define XMIT_HOOK "xmit" +#define XMIT_SECTION "lwt_xmit" +#define NSEC_PER_SEC 1000000000ULL + +/* send a ping to be rerouted to the target device */ +static void ping_once(const char *ip) +{ + /* We won't get a reply. Don't fail here */ + SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", + ip, ICMP_PAYLOAD_SIZE); +} + +/* Send snd_target UDP packets to overflow the fq queue and trigger qdisc drop + * error. This is done via TX tstamp to force buffering delayed packets. + */ +static int overflow_fq(int snd_target, const char *target_ip) +{ + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_port = htons(1234), + }; + + char data_buf[8]; /* only #pkts matter, so use a random small buffer */ + char control_buf[CMSG_SPACE(sizeof(uint64_t))]; + struct iovec iov = { + .iov_base = data_buf, + .iov_len = sizeof(data_buf), + }; + int err = -1; + int s = -1; + struct sock_txtime txtime_on = { + .clockid = CLOCK_MONOTONIC, + .flags = 0, + }; + struct msghdr msg = { + .msg_name = &addr, + .msg_namelen = sizeof(addr), + .msg_control = control_buf, + .msg_controllen = sizeof(control_buf), + .msg_iovlen = 1, + .msg_iov = &iov, + }; + struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); + + memset(data_buf, 0, sizeof(data_buf)); + + s = socket(AF_INET, SOCK_DGRAM, 0); + if (!ASSERT_GE(s, 0, "socket")) + goto out; + + err = setsockopt(s, SOL_SOCKET, SO_TXTIME, &txtime_on, sizeof(txtime_on)); + if (!ASSERT_OK(err, "setsockopt(SO_TXTIME)")) + goto out; + + err = inet_pton(AF_INET, target_ip, &addr.sin_addr); + if (!ASSERT_EQ(err, 1, "inet_pton")) + goto out; + + while (snd_target > 0) { + struct timespec now; + + memset(control_buf, 0, sizeof(control_buf)); + cmsg->cmsg_type = SCM_TXTIME; + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_len = CMSG_LEN(sizeof(uint64_t)); + + err = clock_gettime(CLOCK_MONOTONIC, &now); + if (!ASSERT_OK(err, "clock_gettime(CLOCK_MONOTONIC)")) { + err = -1; + goto out; + } + + *(uint64_t *)CMSG_DATA(cmsg) = (now.tv_nsec + 1) * NSEC_PER_SEC + + now.tv_nsec; + + /* we will intentionally send more than fq limit, so ignore + * the error here. + */ + sendmsg(s, &msg, MSG_NOSIGNAL); + snd_target--; + } + + /* no kernel crash so far is considered success */ + err = 0; + +out: + if (s >= 0) + close(s); + + return err; +} + +static int setup(const char *tun_dev) +{ + int target_index = -1; + int tap_fd = -1; + + tap_fd = open_tuntap(tun_dev, false); + if (!ASSERT_GE(tap_fd, 0, "open_tun")) + return -1; + + target_index = if_nametoindex(tun_dev); + if (!ASSERT_GE(target_index, 0, "if_nametoindex")) + return -1; + + SYS(fail, "ip link add link_err type dummy"); + SYS(fail, "ip link set lo up"); + SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32"); + SYS(fail, "ip link set link_err up"); + SYS(fail, "ip link set %s up", tun_dev); + + SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec lwt_xmit", + TEST_CIDR, BPF_OBJECT); + + SYS(fail, "ip rule add pref 100 from all fwmark %d lookup 100", + target_index); + SYS(fail, "ip route add t 100 default dev %s", tun_dev); + + return tap_fd; + +fail: + if (tap_fd >= 0) + close(tap_fd); + return -1; +} + +static void test_lwt_reroute_normal_xmit(void) +{ + const char *tun_dev = "tun0"; + int tun_fd = -1; + int ifindex = -1; + char ip[256]; + struct timeval timeo = { + .tv_sec = 0, + .tv_usec = 250000, + }; + + tun_fd = setup(tun_dev); + if (!ASSERT_GE(tun_fd, 0, "setup_reroute")) + return; + + ifindex = if_nametoindex(tun_dev); + if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) + return; + + snprintf(ip, 256, "10.0.0.%d", ifindex); + + /* ping packets should be received by the tun device */ + ping_once(ip); + + if (!ASSERT_EQ(wait_for_packet(tun_fd, __expect_icmp_ipv4, &timeo), 1, + "wait_for_packet")) + log_err("%s xmit", __func__); +} + +/* + * Test the failure case when the skb is dropped at the qdisc. This is a + * regression prevention at the xmit hook only. + */ +static void test_lwt_reroute_qdisc_dropped(void) +{ + const char *tun_dev = "tun0"; + int tun_fd = -1; + int ifindex = -1; + char ip[256]; + + tun_fd = setup(tun_dev); + if (!ASSERT_GE(tun_fd, 0, "setup_reroute")) + goto fail; + + SYS(fail, "tc qdisc replace dev %s root fq limit 5 flow_limit 5", tun_dev); + + ifindex = if_nametoindex(tun_dev); + if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) + return; + + snprintf(ip, 256, "10.0.0.%d", ifindex); + ASSERT_EQ(overflow_fq(10, ip), 0, "overflow_fq"); + +fail: + if (tun_fd >= 0) + close(tun_fd); +} + +static void *test_lwt_reroute_run(void *arg) +{ + netns_delete(); + RUN_TEST(lwt_reroute_normal_xmit); + RUN_TEST(lwt_reroute_qdisc_dropped); + return NULL; +} + +void test_lwt_reroute(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_lwt_reroute_run, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c index 5d9955af6247..a70c99c2f8c8 100644 --- a/tools/testing/selftests/bpf/prog_tests/modify_return.c +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -41,6 +41,10 @@ static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret) ASSERT_EQ(skel->bss->fexit_result, 1, "modify_return fexit_result"); ASSERT_EQ(skel->bss->fmod_ret_result, 1, "modify_return fmod_ret_result"); + ASSERT_EQ(skel->bss->fentry_result2, 1, "modify_return fentry_result2"); + ASSERT_EQ(skel->bss->fexit_result2, 1, "modify_return fexit_result2"); + ASSERT_EQ(skel->bss->fmod_ret_result2, 1, "modify_return fmod_ret_result2"); + cleanup: modify_return__destroy(skel); } @@ -49,9 +53,9 @@ cleanup: void serial_test_modify_return(void) { run_test(0 /* input_retval */, - 1 /* want_side_effect */, - 4 /* want_ret */); + 2 /* want_side_effect */, + 33 /* want_ret */); run_test(-EINVAL /* input_retval */, 0 /* want_side_effect */, - -EINVAL /* want_ret */); + -EINVAL * 2 /* want_ret */); } diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c index 7fc01ff490db..f53d658ed080 100644 --- a/tools/testing/selftests/bpf/prog_tests/module_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c @@ -4,6 +4,7 @@ #include <test_progs.h> #include <stdbool.h> #include "test_module_attach.skel.h" +#include "testing_helpers.h" static int duration; @@ -32,11 +33,6 @@ static int trigger_module_test_writable(int *val) return 0; } -static int delete_module(const char *name, int flags) -{ - return syscall(__NR_delete_module, name, flags); -} - void test_module_attach(void) { const int READ_SZ = 456; @@ -93,21 +89,21 @@ void test_module_attach(void) if (!ASSERT_OK_PTR(link, "attach_fentry")) goto cleanup; - ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod"); bpf_link__destroy(link); link = bpf_program__attach(skel->progs.handle_fexit); if (!ASSERT_OK_PTR(link, "attach_fexit")) goto cleanup; - ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod"); bpf_link__destroy(link); link = bpf_program__attach(skel->progs.kprobe_multi); if (!ASSERT_OK_PTR(link, "attach_kprobe_multi")) goto cleanup; - ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod"); bpf_link__destroy(link); cleanup: diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index cd0c42fff7c0..7c0be7cf550b 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -2,17 +2,59 @@ /* Copyright (c) 2020, Tessares SA. */ /* Copyright (c) 2022, SUSE. */ +#include <linux/const.h> +#include <netinet/in.h> #include <test_progs.h> #include "cgroup_helpers.h" #include "network_helpers.h" #include "mptcp_sock.skel.h" +#include "mptcpify.skel.h" #define NS_TEST "mptcp_ns" +#ifndef IPPROTO_MPTCP +#define IPPROTO_MPTCP 262 +#endif + +#ifndef SOL_MPTCP +#define SOL_MPTCP 284 +#endif +#ifndef MPTCP_INFO +#define MPTCP_INFO 1 +#endif +#ifndef MPTCP_INFO_FLAG_FALLBACK +#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0) +#endif +#ifndef MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED +#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1) +#endif + #ifndef TCP_CA_NAME_MAX #define TCP_CA_NAME_MAX 16 #endif +struct __mptcp_info { + __u8 mptcpi_subflows; + __u8 mptcpi_add_addr_signal; + __u8 mptcpi_add_addr_accepted; + __u8 mptcpi_subflows_max; + __u8 mptcpi_add_addr_signal_max; + __u8 mptcpi_add_addr_accepted_max; + __u32 mptcpi_flags; + __u32 mptcpi_token; + __u64 mptcpi_write_seq; + __u64 mptcpi_snd_una; + __u64 mptcpi_rcv_nxt; + __u8 mptcpi_local_addr_used; + __u8 mptcpi_local_addr_max; + __u8 mptcpi_csum_enabled; + __u32 mptcpi_retransmits; + __u64 mptcpi_bytes_retrans; + __u64 mptcpi_bytes_sent; + __u64 mptcpi_bytes_received; + __u64 mptcpi_bytes_acked; +}; + struct mptcp_storage { __u32 invoked; __u32 is_mptcp; @@ -22,6 +64,24 @@ struct mptcp_storage { char ca_name[TCP_CA_NAME_MAX]; }; +static struct nstoken *create_netns(void) +{ + SYS(fail, "ip netns add %s", NS_TEST); + SYS(fail, "ip -net %s link set dev lo up", NS_TEST); + + return open_netns(NS_TEST); +fail: + return NULL; +} + +static void cleanup_netns(struct nstoken *nstoken) +{ + if (nstoken) + close_netns(nstoken); + + SYS_NOFAIL("ip netns del %s &> /dev/null", NS_TEST); +} + static int verify_tsk(int map_fd, int client_fd) { int err, cfd = client_fd; @@ -100,24 +160,14 @@ static int run_test(int cgroup_fd, int server_fd, bool is_mptcp) sock_skel = mptcp_sock__open_and_load(); if (!ASSERT_OK_PTR(sock_skel, "skel_open_load")) - return -EIO; + return libbpf_get_error(sock_skel); err = mptcp_sock__attach(sock_skel); if (!ASSERT_OK(err, "skel_attach")) goto out; prog_fd = bpf_program__fd(sock_skel->progs._sockops); - if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) { - err = -EIO; - goto out; - } - map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map); - if (!ASSERT_GE(map_fd, 0, "bpf_map__fd")) { - err = -EIO; - goto out; - } - err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); if (!ASSERT_OK(err, "bpf_prog_attach")) goto out; @@ -147,11 +197,8 @@ static void test_base(void) if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) return; - SYS(fail, "ip netns add %s", NS_TEST); - SYS(fail, "ip -net %s link set dev lo up", NS_TEST); - - nstoken = open_netns(NS_TEST); - if (!ASSERT_OK_PTR(nstoken, "open_netns")) + nstoken = create_netns(); + if (!ASSERT_OK_PTR(nstoken, "create_netns")) goto fail; /* without MPTCP */ @@ -174,11 +221,104 @@ with_mptcp: close(server_fd); fail: - if (nstoken) - close_netns(nstoken); + cleanup_netns(nstoken); + close(cgroup_fd); +} - SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); +static void send_byte(int fd) +{ + char b = 0x55; + + ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte"); +} + +static int verify_mptcpify(int server_fd, int client_fd) +{ + struct __mptcp_info info; + socklen_t optlen; + int protocol; + int err = 0; + + optlen = sizeof(protocol); + if (!ASSERT_OK(getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen), + "getsockopt(SOL_PROTOCOL)")) + return -1; + + if (!ASSERT_EQ(protocol, IPPROTO_MPTCP, "protocol isn't MPTCP")) + err++; + optlen = sizeof(info); + if (!ASSERT_OK(getsockopt(client_fd, SOL_MPTCP, MPTCP_INFO, &info, &optlen), + "getsockopt(MPTCP_INFO)")) + return -1; + + if (!ASSERT_GE(info.mptcpi_flags, 0, "unexpected mptcpi_flags")) + err++; + if (!ASSERT_FALSE(info.mptcpi_flags & MPTCP_INFO_FLAG_FALLBACK, + "MPTCP fallback")) + err++; + if (!ASSERT_TRUE(info.mptcpi_flags & MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED, + "no remote key received")) + err++; + + return err; +} + +static int run_mptcpify(int cgroup_fd) +{ + int server_fd, client_fd, err = 0; + struct mptcpify *mptcpify_skel; + + mptcpify_skel = mptcpify__open_and_load(); + if (!ASSERT_OK_PTR(mptcpify_skel, "skel_open_load")) + return libbpf_get_error(mptcpify_skel); + + err = mptcpify__attach(mptcpify_skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + /* without MPTCP */ + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_server")) { + err = -EIO; + goto out; + } + + client_fd = connect_to_fd(server_fd, 0); + if (!ASSERT_GE(client_fd, 0, "connect to fd")) { + err = -EIO; + goto close_server; + } + + send_byte(client_fd); + + err = verify_mptcpify(server_fd, client_fd); + + close(client_fd); +close_server: + close(server_fd); +out: + mptcpify__destroy(mptcpify_skel); + return err; +} + +static void test_mptcpify(void) +{ + struct nstoken *nstoken = NULL; + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/mptcpify"); + if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) + return; + + nstoken = create_netns(); + if (!ASSERT_OK_PTR(nstoken, "create_netns")) + goto fail; + + ASSERT_OK(run_mptcpify(cgroup_fd), "run_mptcpify"); + +fail: + cleanup_netns(nstoken); close(cgroup_fd); } @@ -186,4 +326,6 @@ void test_mptcp(void) { if (test__start_subtest("base")) test_base(); + if (test__start_subtest("mptcpify")) + test_mptcpify(); } diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c index d3915c58d0e1..c3333edd029f 100644 --- a/tools/testing/selftests/bpf/prog_tests/netcnt.c +++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c @@ -67,12 +67,12 @@ void serial_test_netcnt(void) } /* No packets should be lost */ - ASSERT_EQ(packets, 10000, "packets"); + ASSERT_GE(packets, 10000, "packets"); /* Let's check that bytes counter matches the number of packets * multiplied by the size of ipv6 ICMP packet. */ - ASSERT_EQ(bytes, packets * 104, "bytes"); + ASSERT_GE(bytes, packets * 104, "bytes"); err: if (cg_fd != -1) diff --git a/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c b/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c new file mode 100644 index 000000000000..4297a2a4cb11 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <netinet/in.h> +#include <linux/netfilter.h> + +#include "test_progs.h" +#include "test_netfilter_link_attach.skel.h" + +struct nf_link_test { + __u32 pf; + __u32 hooknum; + __s32 priority; + __u32 flags; + + bool expect_success; + const char * const name; +}; + +static const struct nf_link_test nf_hook_link_tests[] = { + { .name = "allzero", }, + { .pf = NFPROTO_NUMPROTO, .name = "invalid-pf", }, + { .pf = NFPROTO_IPV4, .hooknum = 42, .name = "invalid-hooknum", }, + { .pf = NFPROTO_IPV4, .priority = INT_MIN, .name = "invalid-priority-min", }, + { .pf = NFPROTO_IPV4, .priority = INT_MAX, .name = "invalid-priority-max", }, + { .pf = NFPROTO_IPV4, .flags = UINT_MAX, .name = "invalid-flags", }, + + { .pf = NFPROTO_INET, .priority = 1, .name = "invalid-inet-not-supported", }, + + { .pf = NFPROTO_IPV4, .priority = -10000, .expect_success = true, .name = "attach ipv4", }, + { .pf = NFPROTO_IPV6, .priority = 10001, .expect_success = true, .name = "attach ipv6", }, +}; + +void test_netfilter_link_attach(void) +{ + struct test_netfilter_link_attach *skel; + struct bpf_program *prog; + LIBBPF_OPTS(bpf_netfilter_opts, opts); + int i; + + skel = test_netfilter_link_attach__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_netfilter_link_attach__open_and_load")) + goto out; + + prog = skel->progs.nf_link_attach_test; + if (!ASSERT_OK_PTR(prog, "attach program")) + goto out; + + for (i = 0; i < ARRAY_SIZE(nf_hook_link_tests); i++) { + struct bpf_link *link; + + if (!test__start_subtest(nf_hook_link_tests[i].name)) + continue; + +#define X(opts, m, i) opts.m = nf_hook_link_tests[(i)].m + X(opts, pf, i); + X(opts, hooknum, i); + X(opts, priority, i); + X(opts, flags, i); +#undef X + link = bpf_program__attach_netfilter(prog, &opts); + if (nf_hook_link_tests[i].expect_success) { + struct bpf_link *link2; + + if (!ASSERT_OK_PTR(link, "program attach successful")) + continue; + + link2 = bpf_program__attach_netfilter(prog, &opts); + ASSERT_ERR_PTR(link2, "attach program with same pf/hook/priority"); + + if (!ASSERT_OK(bpf_link__destroy(link), "link destroy")) + break; + + link2 = bpf_program__attach_netfilter(prog, &opts); + if (!ASSERT_OK_PTR(link2, "program reattach successful")) + continue; + if (!ASSERT_OK(bpf_link__destroy(link2), "link destroy")) + break; + } else { + ASSERT_ERR_PTR(link, "program load failure"); + } + } + +out: + test_netfilter_link_attach__destroy(skel); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c b/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c new file mode 100644 index 000000000000..8d077d150c56 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Yafang Shao <[email protected]> */ + +#include <string.h> +#include <linux/bpf.h> +#include <test_progs.h> +#include "test_ptr_untrusted.skel.h" + +#define TP_NAME "sched_switch" + +void serial_test_ptr_untrusted(void) +{ + struct test_ptr_untrusted *skel; + int err; + + skel = test_ptr_untrusted__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + /* First, attach lsm prog */ + skel->links.lsm_run = bpf_program__attach_lsm(skel->progs.lsm_run); + if (!ASSERT_OK_PTR(skel->links.lsm_run, "lsm_attach")) + goto cleanup; + + /* Second, attach raw_tp prog. The lsm prog will be triggered. */ + skel->links.raw_tp_run = bpf_program__attach_raw_tracepoint(skel->progs.raw_tp_run, + TP_NAME); + if (!ASSERT_OK_PTR(skel->links.raw_tp_run, "raw_tp_attach")) + goto cleanup; + + err = strncmp(skel->bss->tp_name, TP_NAME, strlen(TP_NAME)); + ASSERT_EQ(err, 0, "cmp_tp_name"); + +cleanup: + test_ptr_untrusted__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c index 595cbf92bff5..d6bd5e16e637 100644 --- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -9,8 +9,38 @@ void test_refcounted_kptr(void) { + RUN_TESTS(refcounted_kptr); } void test_refcounted_kptr_fail(void) { + RUN_TESTS(refcounted_kptr_fail); +} + +void test_refcounted_kptr_wrong_owner(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct refcounted_kptr *skel; + int ret; + + skel = refcounted_kptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts); + ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2"); + ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval"); + refcounted_kptr__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c new file mode 100644 index 000000000000..f35852d245e3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Facebook */ +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <sys/types.h> +#include <sys/socket.h> +#include "sk_storage_omem_uncharge.skel.h" + +void test_sk_storage_omem_uncharge(void) +{ + struct sk_storage_omem_uncharge *skel; + int sk_fd = -1, map_fd, err, value; + socklen_t optlen; + + skel = sk_storage_omem_uncharge__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel open_and_load")) + return; + map_fd = bpf_map__fd(skel->maps.sk_storage); + + /* A standalone socket not binding to addr:port, + * so nentns is not needed. + */ + sk_fd = socket(AF_INET6, SOCK_STREAM, 0); + if (!ASSERT_GE(sk_fd, 0, "socket")) + goto done; + + optlen = sizeof(skel->bss->cookie); + err = getsockopt(sk_fd, SOL_SOCKET, SO_COOKIE, &skel->bss->cookie, &optlen); + if (!ASSERT_OK(err, "getsockopt(SO_COOKIE)")) + goto done; + + value = 0; + err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem(value=0)")) + goto done; + + value = 0xdeadbeef; + err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem(value=0xdeadbeef)")) + goto done; + + err = sk_storage_omem_uncharge__attach(skel); + if (!ASSERT_OK(err, "attach")) + goto done; + + close(sk_fd); + sk_fd = -1; + + ASSERT_EQ(skel->bss->cookie_found, 2, "cookie_found"); + ASSERT_EQ(skel->bss->omem, 0, "omem"); + +done: + sk_storage_omem_uncharge__destroy(skel); + if (sk_fd != -1) + close(sk_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c new file mode 100644 index 000000000000..b0583309a94e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <bpf/bpf_endian.h> + +#include "sock_destroy_prog.skel.h" +#include "sock_destroy_prog_fail.skel.h" +#include "network_helpers.h" + +#define TEST_NS "sock_destroy_netns" + +static void start_iter_sockets(struct bpf_program *prog) +{ + struct bpf_link *link; + char buf[50] = {}; + int iter_fd, len; + + link = bpf_program__attach_iter(prog, NULL); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + ASSERT_GE(len, 0, "read"); + + close(iter_fd); + +free_link: + bpf_link__destroy(link); +} + +static void test_tcp_client(struct sock_destroy_prog *skel) +{ + int serv = -1, clien = -1, accept_serv = -1, n; + + serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(serv, 0, "start_server")) + goto cleanup; + + clien = connect_to_fd(serv, 0); + if (!ASSERT_GE(clien, 0, "connect_to_fd")) + goto cleanup; + + accept_serv = accept(serv, NULL, NULL); + if (!ASSERT_GE(accept_serv, 0, "serv accept")) + goto cleanup; + + n = send(clien, "t", 1, 0); + if (!ASSERT_EQ(n, 1, "client send")) + goto cleanup; + + /* Run iterator program that destroys connected client sockets. */ + start_iter_sockets(skel->progs.iter_tcp6_client); + + n = send(clien, "t", 1, 0); + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) + goto cleanup; + ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket"); + +cleanup: + if (clien != -1) + close(clien); + if (accept_serv != -1) + close(accept_serv); + if (serv != -1) + close(serv); +} + +static void test_tcp_server(struct sock_destroy_prog *skel) +{ + int serv = -1, clien = -1, accept_serv = -1, n, serv_port; + + serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(serv, 0, "start_server")) + goto cleanup; + serv_port = get_socket_local_port(serv); + if (!ASSERT_GE(serv_port, 0, "get_sock_local_port")) + goto cleanup; + skel->bss->serv_port = (__be16) serv_port; + + clien = connect_to_fd(serv, 0); + if (!ASSERT_GE(clien, 0, "connect_to_fd")) + goto cleanup; + + accept_serv = accept(serv, NULL, NULL); + if (!ASSERT_GE(accept_serv, 0, "serv accept")) + goto cleanup; + + n = send(clien, "t", 1, 0); + if (!ASSERT_EQ(n, 1, "client send")) + goto cleanup; + + /* Run iterator program that destroys server sockets. */ + start_iter_sockets(skel->progs.iter_tcp6_server); + + n = send(clien, "t", 1, 0); + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) + goto cleanup; + ASSERT_EQ(errno, ECONNRESET, "error code on destroyed socket"); + +cleanup: + if (clien != -1) + close(clien); + if (accept_serv != -1) + close(accept_serv); + if (serv != -1) + close(serv); +} + +static void test_udp_client(struct sock_destroy_prog *skel) +{ + int serv = -1, clien = -1, n = 0; + + serv = start_server(AF_INET6, SOCK_DGRAM, NULL, 0, 0); + if (!ASSERT_GE(serv, 0, "start_server")) + goto cleanup; + + clien = connect_to_fd(serv, 0); + if (!ASSERT_GE(clien, 0, "connect_to_fd")) + goto cleanup; + + n = send(clien, "t", 1, 0); + if (!ASSERT_EQ(n, 1, "client send")) + goto cleanup; + + /* Run iterator program that destroys sockets. */ + start_iter_sockets(skel->progs.iter_udp6_client); + + n = send(clien, "t", 1, 0); + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) + goto cleanup; + /* UDP sockets have an overriding error code after they are disconnected, + * so we don't check for ECONNABORTED error code. + */ + +cleanup: + if (clien != -1) + close(clien); + if (serv != -1) + close(serv); +} + +static void test_udp_server(struct sock_destroy_prog *skel) +{ + int *listen_fds = NULL, n, i, serv_port; + unsigned int num_listens = 5; + char buf[1]; + + /* Start reuseport servers. */ + listen_fds = start_reuseport_server(AF_INET6, SOCK_DGRAM, + "::1", 0, 0, num_listens); + if (!ASSERT_OK_PTR(listen_fds, "start_reuseport_server")) + goto cleanup; + serv_port = get_socket_local_port(listen_fds[0]); + if (!ASSERT_GE(serv_port, 0, "get_sock_local_port")) + goto cleanup; + skel->bss->serv_port = (__be16) serv_port; + + /* Run iterator program that destroys server sockets. */ + start_iter_sockets(skel->progs.iter_udp6_server); + + for (i = 0; i < num_listens; ++i) { + n = read(listen_fds[i], buf, sizeof(buf)); + if (!ASSERT_EQ(n, -1, "read") || + !ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket")) + break; + } + ASSERT_EQ(i, num_listens, "server socket"); + +cleanup: + free_fds(listen_fds, num_listens); +} + +void test_sock_destroy(void) +{ + struct sock_destroy_prog *skel; + struct nstoken *nstoken = NULL; + int cgroup_fd; + + skel = sock_destroy_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + cgroup_fd = test__join_cgroup("/sock_destroy"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) + goto cleanup; + + skel->links.sock_connect = bpf_program__attach_cgroup( + skel->progs.sock_connect, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.sock_connect, "prog_attach")) + goto cleanup; + + SYS(cleanup, "ip netns add %s", TEST_NS); + SYS(cleanup, "ip -net %s link set dev lo up", TEST_NS); + + nstoken = open_netns(TEST_NS); + if (!ASSERT_OK_PTR(nstoken, "open_netns")) + goto cleanup; + + if (test__start_subtest("tcp_client")) + test_tcp_client(skel); + if (test__start_subtest("tcp_server")) + test_tcp_server(skel); + if (test__start_subtest("udp_client")) + test_udp_client(skel); + if (test__start_subtest("udp_server")) + test_udp_server(skel); + + RUN_TESTS(sock_destroy_prog_fail); + +cleanup: + if (nstoken) + close_netns(nstoken); + SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); + if (cgroup_fd >= 0) + close(cgroup_fd); + sock_destroy_prog__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 064cc5e8d9ad..dda7060e86a0 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -475,6 +475,55 @@ out: test_sockmap_drop_prog__destroy(drop); } +static void test_sockmap_skb_verdict_peek(void) +{ + int err, map, verdict, s, c1, p1, zero = 0, sent, recvd, avail; + struct test_sockmap_pass_prog *pass; + char snd[256] = "0123456789"; + char rcv[256] = "0"; + + pass = test_sockmap_pass_prog__open_and_load(); + if (!ASSERT_OK_PTR(pass, "open_and_load")) + return; + verdict = bpf_program__fd(pass->progs.prog_skb_verdict); + map = bpf_map__fd(pass->maps.sock_map_rx); + + err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + s = socket_loopback(AF_INET, SOCK_STREAM); + if (!ASSERT_GT(s, -1, "socket_loopback(s)")) + goto out; + + err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1); + if (!ASSERT_OK(err, "create_pairs(s)")) + goto out; + + err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) + goto out_close; + + sent = xsend(p1, snd, sizeof(snd), 0); + ASSERT_EQ(sent, sizeof(snd), "xsend(p1)"); + recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK); + ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)"); + err = ioctl(c1, FIONREAD, &avail); + ASSERT_OK(err, "ioctl(FIONREAD) error"); + ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)"); + recvd = recv(c1, rcv, sizeof(rcv), 0); + ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)"); + err = ioctl(c1, FIONREAD, &avail); + ASSERT_OK(err, "ioctl(FIONREAD) error"); + ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)"); + +out_close: + close(c1); + close(p1); +out: + test_sockmap_pass_prog__destroy(pass); +} + void test_sockmap_basic(void) { if (test__start_subtest("sockmap create_update_free")) @@ -515,4 +564,6 @@ void test_sockmap_basic(void) test_sockmap_skb_verdict_fionread(true); if (test__start_subtest("sockmap skb_verdict fionread on drop")) test_sockmap_skb_verdict_fionread(false); + if (test__start_subtest("sockmap skb_verdict msg_f_peek")) + test_sockmap_skb_verdict_peek(); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h index d12665490a90..36d829a65aa4 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h @@ -179,6 +179,32 @@ __ret; \ }) +static inline int poll_connect(int fd, unsigned int timeout_sec) +{ + struct timeval timeout = { .tv_sec = timeout_sec }; + fd_set wfds; + int r, eval; + socklen_t esize = sizeof(eval); + + FD_ZERO(&wfds); + FD_SET(fd, &wfds); + + r = select(fd + 1, NULL, &wfds, NULL, &timeout); + if (r == 0) + errno = ETIME; + if (r != 1) + return -1; + + if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0) + return -1; + if (eval != 0) { + errno = eval; + return -1; + } + + return 0; +} + static inline int poll_read(int fd, unsigned int timeout_sec) { struct timeval timeout = { .tv_sec = timeout_sec }; diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index b4f6f3a50ae5..8df8cbb447f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -869,6 +869,77 @@ static void test_msg_redir_to_listening(struct test_sockmap_listen *skel, xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT); } +static void redir_partial(int family, int sotype, int sock_map, int parser_map) +{ + int s, c0, c1, p0, p1; + int err, n, key, value; + char buf[] = "abc"; + + key = 0; + value = sizeof(buf) - 1; + err = xbpf_map_update_elem(parser_map, &key, &value, 0); + if (err) + return; + + s = socket_loopback(family, sotype | SOCK_NONBLOCK); + if (s < 0) + goto clean_parser_map; + + err = create_socket_pairs(s, family, sotype, &c0, &c1, &p0, &p1); + if (err) + goto close_srv; + + err = add_to_sockmap(sock_map, p0, p1); + if (err) + goto close; + + n = xsend(c1, buf, sizeof(buf), 0); + if (n < sizeof(buf)) + FAIL("incomplete write"); + + n = xrecv_nonblock(c0, buf, sizeof(buf), 0); + if (n != sizeof(buf) - 1) + FAIL("expect %zu, received %d", sizeof(buf) - 1, n); + +close: + xclose(c0); + xclose(p0); + xclose(c1); + xclose(p1); +close_srv: + xclose(s); + +clean_parser_map: + key = 0; + value = 0; + xbpf_map_update_elem(parser_map, &key, &value, 0); +} + +static void test_skb_redir_partial(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family, + int sotype) +{ + int verdict = bpf_program__fd(skel->progs.prog_stream_verdict); + int parser = bpf_program__fd(skel->progs.prog_stream_parser); + int parser_map = bpf_map__fd(skel->maps.parser_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0); + if (err) + return; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (err) + goto detach; + + redir_partial(family, sotype, sock_map, parser_map); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT); +detach: + xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER); +} + static void test_reuseport_select_listening(int family, int sotype, int sock_map, int verd_map, int reuseport_prog) @@ -1243,6 +1314,7 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map, } tests[] = { TEST(test_skb_redir_to_connected), TEST(test_skb_redir_to_listening), + TEST(test_skb_redir_partial), TEST(test_msg_redir_to_connected), TEST(test_msg_redir_to_listening), }; @@ -1380,11 +1452,18 @@ static int vsock_socketpair_connectible(int sotype, int *v0, int *v1) if (p < 0) goto close_cli; + if (poll_connect(c, IO_TIMEOUT_SEC) < 0) { + FAIL_ERRNO("poll_connect"); + goto close_acc; + } + *v0 = p; *v1 = c; return 0; +close_acc: + close(p); close_cli: close(c); close_srv: @@ -1432,7 +1511,7 @@ static void vsock_unix_redir_connectible(int sock_mapfd, int verd_mapfd, if (n < 1) goto out; - n = recv(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), MSG_DONTWAIT); + n = xrecv_nonblock(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), 0); if (n < 0) FAIL("%s: recv() err, errno=%d", log_prefix, errno); if (n == 0) diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c index aa4debf62fc6..9e6a5e3ed4de 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c @@ -5,10 +5,15 @@ static char bpf_log_buf[4096]; static bool verbose; +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + enum sockopt_test_error { OK = 0, DENY_LOAD, DENY_ATTACH, + EOPNOTSUPP_GETSOCKOPT, EPERM_GETSOCKOPT, EFAULT_GETSOCKOPT, EPERM_SETSOCKOPT, @@ -273,10 +278,31 @@ static struct sockopt_test { .error = EFAULT_GETSOCKOPT, }, { - .descr = "getsockopt: deny arbitrary ctx->retval", + .descr = "getsockopt: ignore >PAGE_SIZE optlen", .insns = { - /* ctx->retval = 123 */ - BPF_MOV64_IMM(BPF_REG_0, 123), + /* write 0xFF to the first optval byte */ + + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 0xF0 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xFF), + /* } */ + + /* retval changes are ignored */ + /* ctx->retval = 5 */ + BPF_MOV64_IMM(BPF_REG_0, 5), BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, offsetof(struct bpf_sockopt, retval)), @@ -287,9 +313,11 @@ static struct sockopt_test { .attach_type = BPF_CGROUP_GETSOCKOPT, .expected_attach_type = BPF_CGROUP_GETSOCKOPT, - .get_optlen = 64, - - .error = EFAULT_GETSOCKOPT, + .get_level = 1234, + .get_optname = 5678, + .get_optval = {}, /* the changes are ignored */ + .get_optlen = PAGE_SIZE + 1, + .error = EOPNOTSUPP_GETSOCKOPT, }, { .descr = "getsockopt: support smaller ctx->optlen", @@ -649,6 +677,45 @@ static struct sockopt_test { .error = EFAULT_SETSOCKOPT, }, { + .descr = "setsockopt: ignore >PAGE_SIZE optlen", + .insns = { + /* write 0xFF to the first optval byte */ + + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 0xF0 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0), + /* } */ + + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_level = SOL_IP, + .set_optname = IP_TOS, + .set_optval = {}, + .set_optlen = PAGE_SIZE + 1, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + .get_optval = {}, /* the changes are ignored */ + .get_optlen = 4, + }, + { .descr = "setsockopt: allow changing ctx->optlen within bounds", .insns = { /* r6 = ctx->optval */ @@ -906,6 +973,13 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) } if (test->set_optlen) { + if (test->set_optlen >= PAGE_SIZE) { + int num_pages = test->set_optlen / PAGE_SIZE; + int remainder = test->set_optlen % PAGE_SIZE; + + test->set_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder; + } + err = setsockopt(sock_fd, test->set_level, test->set_optname, test->set_optval, test->set_optlen); if (err) { @@ -921,7 +995,15 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) } if (test->get_optlen) { + if (test->get_optlen >= PAGE_SIZE) { + int num_pages = test->get_optlen / PAGE_SIZE; + int remainder = test->get_optlen % PAGE_SIZE; + + test->get_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder; + } + optval = malloc(test->get_optlen); + memset(optval, 0, test->get_optlen); socklen_t optlen = test->get_optlen; socklen_t expected_get_optlen = test->get_optlen_ret ?: test->get_optlen; @@ -929,6 +1011,8 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) err = getsockopt(sock_fd, test->get_level, test->get_optname, optval, &optlen); if (err) { + if (errno == EOPNOTSUPP && test->error == EOPNOTSUPP_GETSOCKOPT) + goto free_optval; if (errno == EPERM && test->error == EPERM_GETSOCKOPT) goto free_optval; if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT) @@ -976,7 +1060,9 @@ void test_sockopt(void) return; for (i = 0; i < ARRAY_SIZE(tests); i++) { - test__start_subtest(tests[i].descr); + if (!test__start_subtest(tests[i].descr)) + continue; + ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 60c17a8e2789..917f486db826 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -2,6 +2,8 @@ #include <test_progs.h> #include "cgroup_helpers.h" +#include "sockopt_inherit.skel.h" + #define SOL_CUSTOM 0xdeadbeef #define CUSTOM_INHERIT1 0 #define CUSTOM_INHERIT2 1 @@ -132,58 +134,30 @@ static int start_server(void) return fd; } -static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, - const char *prog_name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) { - log_err("Failed to deduct types for %s BPF program", prog_name); - return -1; - } - - prog = bpf_object__find_program_by_name(obj, prog_name); - if (!prog) { - log_err("Failed to find %s BPF program", prog_name); - return -1; - } - - err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, - attach_type, 0); - if (err) { - log_err("Failed to attach %s BPF program", prog_name); - return -1; - } - - return 0; -} - static void run_test(int cgroup_fd) { + struct bpf_link *link_getsockopt = NULL; + struct bpf_link *link_setsockopt = NULL; int server_fd = -1, client_fd; - struct bpf_object *obj; + struct sockopt_inherit *obj; void *server_err; pthread_t tid; int err; - obj = bpf_object__open_file("sockopt_inherit.bpf.o", NULL); - if (!ASSERT_OK_PTR(obj, "obj_open")) + obj = sockopt_inherit__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) return; - err = bpf_object__load(obj); - if (!ASSERT_OK(err, "obj_load")) - goto close_bpf_object; + obj->bss->page_size = sysconf(_SC_PAGESIZE); - err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt", "_getsockopt"); - if (!ASSERT_OK(err, "prog_attach _getsockopt")) + link_getsockopt = bpf_program__attach_cgroup(obj->progs._getsockopt, + cgroup_fd); + if (!ASSERT_OK_PTR(link_getsockopt, "cg-attach-getsockopt")) goto close_bpf_object; - err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt", "_setsockopt"); - if (!ASSERT_OK(err, "prog_attach _setsockopt")) + link_setsockopt = bpf_program__attach_cgroup(obj->progs._setsockopt, + cgroup_fd); + if (!ASSERT_OK_PTR(link_setsockopt, "cg-attach-setsockopt")) goto close_bpf_object; server_fd = start_server(); @@ -217,7 +191,10 @@ static void run_test(int cgroup_fd) close_server_fd: close(server_fd); close_bpf_object: - bpf_object__close(obj); + bpf_link__destroy(link_getsockopt); + bpf_link__destroy(link_setsockopt); + + sockopt_inherit__destroy(obj); } void test_sockopt_inherit(void) diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index 7f5659349011..759bbb6f8c5f 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -2,61 +2,13 @@ #include <test_progs.h> #include "cgroup_helpers.h" -static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) { - log_err("Failed to deduct types for %s BPF program", title); - return -1; - } - - prog = bpf_object__find_program_by_name(obj, name); - if (!prog) { - log_err("Failed to find %s BPF program", name); - return -1; - } - - err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, - attach_type, BPF_F_ALLOW_MULTI); - if (err) { - log_err("Failed to attach %s BPF program", name); - return -1; - } - - return 0; -} +#include "sockopt_multi.skel.h" -static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) - return -1; - - prog = bpf_object__find_program_by_name(obj, name); - if (!prog) - return -1; - - err = bpf_prog_detach2(bpf_program__fd(prog), cgroup_fd, - attach_type); - if (err) - return -1; - - return 0; -} - -static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, +static int run_getsockopt_test(struct sockopt_multi *obj, int cg_parent, int cg_child, int sock_fd) { + struct bpf_link *link_parent = NULL; + struct bpf_link *link_child = NULL; socklen_t optlen; __u8 buf; int err; @@ -89,8 +41,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - child: 0x80 -> 0x90 */ - err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - if (err) + link_child = bpf_program__attach_cgroup(obj->progs._getsockopt_child, + cg_child); + if (!ASSERT_OK_PTR(link_child, "cg-attach-getsockopt_child")) goto detach; buf = 0x00; @@ -113,8 +66,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - parent: 0x90 -> 0xA0 */ - err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); - if (err) + link_parent = bpf_program__attach_cgroup(obj->progs._getsockopt_parent, + cg_parent); + if (!ASSERT_OK_PTR(link_parent, "cg-attach-getsockopt_parent")) goto detach; buf = 0x00; @@ -157,11 +111,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - parent: unexpected 0x40, EPERM */ - err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - if (err) { - log_err("Failed to detach child program"); - goto detach; - } + bpf_link__destroy(link_child); + link_child = NULL; buf = 0x00; optlen = 1; @@ -198,15 +149,17 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, } detach: - prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); + bpf_link__destroy(link_child); + bpf_link__destroy(link_parent); return err; } -static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, +static int run_setsockopt_test(struct sockopt_multi *obj, int cg_parent, int cg_child, int sock_fd) { + struct bpf_link *link_parent = NULL; + struct bpf_link *link_child = NULL; socklen_t optlen; __u8 buf; int err; @@ -236,8 +189,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, /* Attach child program and make sure it adds 0x10. */ - err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); - if (err) + link_child = bpf_program__attach_cgroup(obj->progs._setsockopt, + cg_child); + if (!ASSERT_OK_PTR(link_child, "cg-attach-setsockopt_child")) goto detach; buf = 0x80; @@ -263,8 +217,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, /* Attach parent program and make sure it adds another 0x10. */ - err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); - if (err) + link_parent = bpf_program__attach_cgroup(obj->progs._setsockopt, + cg_parent); + if (!ASSERT_OK_PTR(link_parent, "cg-attach-setsockopt_parent")) goto detach; buf = 0x80; @@ -289,8 +244,8 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, } detach: - prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); - prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); + bpf_link__destroy(link_child); + bpf_link__destroy(link_parent); return err; } @@ -298,9 +253,8 @@ detach: void test_sockopt_multi(void) { int cg_parent = -1, cg_child = -1; - struct bpf_object *obj = NULL; + struct sockopt_multi *obj = NULL; int sock_fd = -1; - int err = -1; cg_parent = test__join_cgroup("/parent"); if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent")) @@ -310,13 +264,11 @@ void test_sockopt_multi(void) if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child")) goto out; - obj = bpf_object__open_file("sockopt_multi.bpf.o", NULL); - if (!ASSERT_OK_PTR(obj, "obj_load")) + obj = sockopt_multi__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) goto out; - err = bpf_object__load(obj); - if (!ASSERT_OK(err, "obj_load")) - goto out; + obj->bss->page_size = sysconf(_SC_PAGESIZE); sock_fd = socket(AF_INET, SOCK_STREAM, 0); if (!ASSERT_GE(sock_fd, 0, "socket")) @@ -327,7 +279,7 @@ void test_sockopt_multi(void) out: close(sock_fd); - bpf_object__close(obj); + sockopt_multi__destroy(obj); close(cg_child); close(cg_parent); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c index 6b53b3cb8dad..6b2d300e9fd4 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c @@ -42,6 +42,8 @@ void test_sockopt_qos_to_cc(void) if (!ASSERT_OK_PTR(skel, "skel")) goto done; + skel->bss->page_size = sysconf(_SC_PAGESIZE); + sock_fd = socket(AF_INET6, SOCK_STREAM, 0); if (!ASSERT_GE(sock_fd, 0, "v6 socket open")) goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c index d9270bd3d920..f29c08d93beb 100644 --- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <regex.h> #include <test_progs.h> #include <network_helpers.h> @@ -19,12 +20,16 @@ static struct { "; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n" "R1 type=map_value expected=percpu_ptr_" }, { "lock_id_mapval_preserve", - "8: (bf) r1 = r0 ; R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0) " - "R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n9: (85) call bpf_this_cpu_ptr#154\n" + "[0-9]\\+: (bf) r1 = r0 ;" + " R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)" + " R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n" + "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n" "R1 type=map_value expected=percpu_ptr_" }, { "lock_id_innermapval_preserve", - "13: (bf) r1 = r0 ; R0=map_value(id=2,off=0,ks=4,vs=8,imm=0) " - "R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n14: (85) call bpf_this_cpu_ptr#154\n" + "[0-9]\\+: (bf) r1 = r0 ;" + " R0=map_value(id=2,off=0,ks=4,vs=8,imm=0)" + " R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n" + "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n" "R1 type=map_value expected=percpu_ptr_" }, { "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" }, { "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" }, @@ -45,6 +50,24 @@ static struct { { "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" }, }; +static int match_regex(const char *pattern, const char *string) +{ + int err, rc; + regex_t re; + + err = regcomp(&re, pattern, REG_NOSUB); + if (err) { + char errbuf[512]; + + regerror(err, &re, errbuf, sizeof(errbuf)); + PRINT_FAIL("Can't compile regex: %s\n", errbuf); + return -1; + } + rc = regexec(&re, string, 0, NULL, 0); + regfree(&re); + return rc == 0 ? 1 : 0; +} + static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg) { LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, @@ -74,7 +97,11 @@ static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg) goto end; } - if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + ret = match_regex(err_msg, log_buf); + if (!ASSERT_GE(ret, 0, "match_regex")) + goto end; + + if (!ASSERT_TRUE(ret, "no match for expected error message")) { fprintf(stderr, "Expected: %s\n", err_msg); fprintf(stderr, "Verifier: %s\n", log_buf); } diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c index 740d5f644b40..d4579f735398 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -79,6 +79,8 @@ static const char * const success_tests[] = { "test_task_from_pid_current", "test_task_from_pid_invalid", "task_kfunc_acquire_trusted_walked", + "test_task_kfunc_flavor_relo", + "test_task_kfunc_flavor_relo_not_found", }; void test_task_kfunc(void) diff --git a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c new file mode 100644 index 000000000000..4224727fb364 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include <sys/syscall.h> +#include <test_progs.h> +#include <cgroup_helpers.h> +#include "test_task_under_cgroup.skel.h" + +#define FOO "/foo" + +void test_task_under_cgroup(void) +{ + struct test_task_under_cgroup *skel; + int ret, foo; + pid_t pid; + + foo = test__join_cgroup(FOO); + if (!ASSERT_OK(foo < 0, "cgroup_join_foo")) + return; + + skel = test_task_under_cgroup__open(); + if (!ASSERT_OK_PTR(skel, "test_task_under_cgroup__open")) + goto cleanup; + + skel->rodata->local_pid = getpid(); + skel->bss->remote_pid = getpid(); + skel->rodata->cgid = get_cgroup_id(FOO); + + ret = test_task_under_cgroup__load(skel); + if (!ASSERT_OK(ret, "test_task_under_cgroup__load")) + goto cleanup; + + ret = test_task_under_cgroup__attach(skel); + if (!ASSERT_OK(ret, "test_task_under_cgroup__attach")) + goto cleanup; + + pid = fork(); + if (pid == 0) + exit(0); + + ret = (pid == -1); + if (ASSERT_OK(ret, "fork process")) + wait(NULL); + + test_task_under_cgroup__detach(skel); + + ASSERT_NEQ(skel->bss->remote_pid, skel->rodata->local_pid, + "test task_under_cgroup"); + +cleanup: + test_task_under_cgroup__destroy(skel); + close(foo); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c index e873766276d1..48b55539331e 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c @@ -3,6 +3,7 @@ #include <test_progs.h> #include <linux/pkt_cls.h> +#include "cap_helpers.h" #include "test_tc_bpf.skel.h" #define LO_IFINDEX 1 @@ -327,7 +328,7 @@ static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd) return 0; } -void test_tc_bpf(void) +void tc_bpf_root(void) { DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX, .attach_point = BPF_TC_INGRESS); @@ -393,3 +394,36 @@ end: } test_tc_bpf__destroy(skel); } + +void tc_bpf_non_root(void) +{ + struct test_tc_bpf *skel = NULL; + __u64 caps = 0; + int ret; + + /* In case CAP_BPF and CAP_PERFMON is not set */ + ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps); + if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin")) + return; + ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL); + if (!ASSERT_OK(ret, "disable_cap_sys_admin")) + goto restore_cap; + + skel = test_tc_bpf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load")) + goto restore_cap; + + test_tc_bpf__destroy(skel); + +restore_cap: + if (caps) + cap_enable_effective(caps, NULL); +} + +void test_tc_bpf(void) +{ + if (test__start_subtest("tc_bpf_root")) + tc_bpf_root(); + if (test__start_subtest("tc_bpf_non_root")) + tc_bpf_non_root(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h new file mode 100644 index 000000000000..67f985f7d215 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Isovalent */ +#ifndef TC_HELPERS +#define TC_HELPERS +#include <test_progs.h> + +static inline __u32 id_from_prog_fd(int fd) +{ + struct bpf_prog_info prog_info = {}; + __u32 prog_info_len = sizeof(prog_info); + int err; + + err = bpf_obj_get_info_by_fd(fd, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "id_from_prog_fd")) + return 0; + + ASSERT_NEQ(prog_info.id, 0, "prog_info.id"); + return prog_info.id; +} + +static inline __u32 id_from_link_fd(int fd) +{ + struct bpf_link_info link_info = {}; + __u32 link_info_len = sizeof(link_info); + int err; + + err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); + if (!ASSERT_OK(err, "id_from_link_fd")) + return 0; + + ASSERT_NEQ(link_info.id, 0, "link_info.id"); + return link_info.id; +} + +static inline __u32 ifindex_from_link_fd(int fd) +{ + struct bpf_link_info link_info = {}; + __u32 link_info_len = sizeof(link_info); + int err; + + err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); + if (!ASSERT_OK(err, "id_from_link_fd")) + return 0; + + return link_info.tcx.ifindex; +} + +static inline void __assert_mprog_count(int target, int expected, int ifindex) +{ + __u32 count = 0, attach_flags = 0; + int err; + + err = bpf_prog_query(ifindex, target, 0, &attach_flags, + NULL, &count); + ASSERT_EQ(count, expected, "count"); + ASSERT_EQ(err, 0, "prog_query"); +} + +static inline void assert_mprog_count(int target, int expected) +{ + __assert_mprog_count(target, expected, loopback); +} + +static inline void assert_mprog_count_ifindex(int ifindex, int target, int expected) +{ + __assert_mprog_count(target, expected, ifindex); +} + +static inline void tc_skel_reset_all_seen(struct test_tc_link *skel) +{ + memset(skel->bss, 0, sizeof(*skel->bss)); +} + +#endif /* TC_HELPERS */ diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c new file mode 100644 index 000000000000..bc9841144685 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c @@ -0,0 +1,1901 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include <uapi/linux/if_link.h> +#include <uapi/linux/pkt_sched.h> +#include <net/if.h> +#include <test_progs.h> + +#define loopback 1 +#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +#include "test_tc_link.skel.h" +#include "tc_helpers.h" + +void serial_test_tc_links_basic(void) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 prog_ids[2], link_ids[2]; + __u32 pid1, pid2, lid1, lid2; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + + assert_mprog_count(BPF_TCX_INGRESS, 0); + assert_mprog_count(BPF_TCX_EGRESS, 0); + + ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count(BPF_TCX_INGRESS, 1); + assert_mprog_count(BPF_TCX_EGRESS, 0); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + ASSERT_NEQ(lid1, lid2, "link_ids_1_2"); + + assert_mprog_count(BPF_TCX_INGRESS, 1); + assert_mprog_count(BPF_TCX_EGRESS, 1); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +cleanup: + test_tc_link__destroy(skel); + + assert_mprog_count(BPF_TCX_INGRESS, 0); + assert_mprog_count(BPF_TCX_EGRESS, 0); +} + +static void test_tc_links_before_target(int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 prog_ids[5], link_ids[5]; + __u32 pid1, pid2, pid3, pid4; + __u32 lid1, lid2, lid3, lid4; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), + 0, "tc3_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), + 0, "tc4_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count(target, 1); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE, + .relative_fd = bpf_program__fd(skel->progs.tc2), + ); + + link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc3 = link; + + lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE | BPF_F_LINK, + .relative_id = lid1, + ); + + link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc4 = link; + + lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + + assert_mprog_count(target, 4); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], pid2, "prog_ids[3]"); + ASSERT_EQ(optq.link_ids[3], lid2, "link_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_before(void) +{ + test_tc_links_before_target(BPF_TCX_INGRESS); + test_tc_links_before_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_after_target(int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 prog_ids[5], link_ids[5]; + __u32 pid1, pid2, pid3, pid4; + __u32 lid1, lid2, lid3, lid4; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), + 0, "tc3_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), + 0, "tc4_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count(target, 1); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER, + .relative_fd = bpf_program__fd(skel->progs.tc1), + ); + + link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc3 = link; + + lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER | BPF_F_LINK, + .relative_fd = bpf_link__fd(skel->links.tc2), + ); + + link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc4 = link; + + lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + + assert_mprog_count(target, 4); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]"); + ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_after(void) +{ + test_tc_links_after_target(BPF_TCX_INGRESS); + test_tc_links_after_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_revision_target(int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 prog_ids[3], link_ids[3]; + __u32 pid1, pid2, lid1, lid2; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + + assert_mprog_count(target, 0); + + optl.expected_revision = 1; + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count(target, 1); + + optl.expected_revision = 1; + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 1); + + optl.expected_revision = 2; + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_revision(void) +{ + test_tc_links_revision_target(BPF_TCX_INGRESS); + test_tc_links_revision_target(BPF_TCX_EGRESS); +} + +static void test_tc_chain_classic(int target, bool chain_tc_old) +{ + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); + bool hook_created = false, tc_attached = false; + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 pid1, pid2, pid3; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + if (chain_tc_old) { + tc_hook.attach_point = target == BPF_TCX_INGRESS ? + BPF_TC_INGRESS : BPF_TC_EGRESS; + err = bpf_tc_hook_create(&tc_hook); + if (err == 0) + hook_created = true; + err = err == -EEXIST ? 0 : err; + if (!ASSERT_OK(err, "bpf_tc_hook_create")) + goto cleanup; + + tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3); + err = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(err, "bpf_tc_attach")) + goto cleanup; + tc_attached = true; + } + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + assert_mprog_count(target, 2); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); + + err = bpf_link__detach(skel->links.tc2); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup; + + assert_mprog_count(target, 1); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); +cleanup: + if (tc_attached) { + tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; + err = bpf_tc_detach(&tc_hook, &tc_opts); + ASSERT_OK(err, "bpf_tc_detach"); + } + if (hook_created) { + tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; + bpf_tc_hook_destroy(&tc_hook); + } + assert_mprog_count(target, 1); + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_chain_classic(void) +{ + test_tc_chain_classic(BPF_TCX_INGRESS, false); + test_tc_chain_classic(BPF_TCX_EGRESS, false); + test_tc_chain_classic(BPF_TCX_INGRESS, true); + test_tc_chain_classic(BPF_TCX_EGRESS, true); +} + +static void test_tc_links_replace_target(int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 pid1, pid2, pid3, lid1, lid2; + __u32 prog_ids[4], link_ids[4]; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), + 0, "tc3_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + optl.expected_revision = 1; + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE, + .relative_id = pid1, + .expected_revision = 2, + ); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_REPLACE, + .relative_fd = bpf_program__fd(skel->progs.tc2), + .expected_revision = 3, + ); + + link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_REPLACE | BPF_F_LINK, + .relative_fd = bpf_link__fd(skel->links.tc2), + .expected_revision = 3, + ); + + link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_REPLACE | BPF_F_LINK | BPF_F_AFTER, + .relative_id = lid2, + ); + + link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 2); + + err = bpf_link__update_program(skel->links.tc2, skel->progs.tc3); + if (!ASSERT_OK(err, "link_update")) + goto cleanup; + + assert_mprog_count(target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 4, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + + err = bpf_link__detach(skel->links.tc2); + if (!ASSERT_OK(err, "link_detach")) + goto cleanup; + + assert_mprog_count(target, 1); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + + err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1); + if (!ASSERT_OK(err, "link_update_self")) + goto cleanup; + + assert_mprog_count(target, 1); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_replace(void) +{ + test_tc_links_replace_target(BPF_TCX_INGRESS); + test_tc_links_replace_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_invalid_target(int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 pid1, pid2, lid1; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + + assert_mprog_count(target, 0); + + optl.flags = BPF_F_BEFORE | BPF_F_AFTER; + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE | BPF_F_ID, + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER | BPF_F_ID, + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_ID, + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_LINK, + .relative_fd = bpf_program__fd(skel->progs.tc2), + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_LINK, + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .relative_fd = bpf_program__fd(skel->progs.tc2), + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE | BPF_F_AFTER, + .relative_fd = bpf_program__fd(skel->progs.tc2), + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE, + .relative_fd = bpf_program__fd(skel->progs.tc1), + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_ID, + .relative_id = pid2, + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_ID, + .relative_id = 42, + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE, + .relative_fd = bpf_program__fd(skel->progs.tc1), + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE | BPF_F_LINK, + .relative_fd = bpf_program__fd(skel->progs.tc1), + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER, + .relative_fd = bpf_program__fd(skel->progs.tc1), + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl); + + link = bpf_program__attach_tcx(skel->progs.tc1, 0, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER | BPF_F_LINK, + .relative_fd = bpf_program__fd(skel->progs.tc1), + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optl); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER | BPF_F_LINK, + .relative_fd = bpf_program__fd(skel->progs.tc1), + ); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, + .relative_id = ~0, + ); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, + .relative_id = lid1, + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE | BPF_F_ID, + .relative_id = pid1, + ); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, + .relative_id = lid1, + ); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + assert_mprog_count(target, 2); +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_invalid(void) +{ + test_tc_links_invalid_target(BPF_TCX_INGRESS); + test_tc_links_invalid_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_prepend_target(int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 prog_ids[5], link_ids[5]; + __u32 pid1, pid2, pid3, pid4; + __u32 lid1, lid2, lid3, lid4; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), + 0, "tc3_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), + 0, "tc4_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE, + ); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE, + ); + + link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc3 = link; + + lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE, + ); + + link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc4 = link; + + lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + + assert_mprog_count(target, 4); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], pid1, "prog_ids[3]"); + ASSERT_EQ(optq.link_ids[3], lid1, "link_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_prepend(void) +{ + test_tc_links_prepend_target(BPF_TCX_INGRESS); + test_tc_links_prepend_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_append_target(int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 prog_ids[5], link_ids[5]; + __u32 pid1, pid2, pid3, pid4; + __u32 lid1, lid2, lid3, lid4; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), + 0, "tc3_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), + 0, "tc4_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER, + ); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER, + ); + + link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc3 = link; + + lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_AFTER, + ); + + link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc4 = link; + + lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + + assert_mprog_count(target, 4); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]"); + ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_append(void) +{ + test_tc_links_append_target(BPF_TCX_INGRESS); + test_tc_links_append_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_dev_cleanup_target(int target) +{ + LIBBPF_OPTS(bpf_tcx_opts, optl); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 pid1, pid2, pid3, pid4; + struct test_tc_link *skel; + struct bpf_link *link; + int err, ifindex; + + ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); + ifindex = if_nametoindex("tcx_opts1"); + ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), + 0, "tc3_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), + 0, "tc4_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + assert_mprog_count_ifindex(ifindex, target, 1); + + link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + assert_mprog_count_ifindex(ifindex, target, 2); + + link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc3 = link; + + assert_mprog_count_ifindex(ifindex, target, 3); + + link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc4 = link; + + assert_mprog_count_ifindex(ifindex, target, 4); + + ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); + ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); + ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); + + ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex"); + ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex"); + ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex"); + ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex"); + + test_tc_link__destroy(skel); + return; +cleanup: + test_tc_link__destroy(skel); + + ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); + ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); + ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +} + +void serial_test_tc_links_dev_cleanup(void) +{ + test_tc_links_dev_cleanup_target(BPF_TCX_INGRESS); + test_tc_links_dev_cleanup_target(BPF_TCX_EGRESS); +} + +static void test_tc_chain_mixed(int target) +{ + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); + LIBBPF_OPTS(bpf_tcx_opts, optl); + struct test_tc_link *skel; + struct bpf_link *link; + __u32 pid1, pid2, pid3; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), + 0, "tc4_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc5, target), + 0, "tc5_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc6, target), + 0, "tc6_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc5)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc6)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + tc_hook.attach_point = target == BPF_TCX_INGRESS ? + BPF_TC_INGRESS : BPF_TC_EGRESS; + err = bpf_tc_hook_create(&tc_hook); + err = err == -EEXIST ? 0 : err; + if (!ASSERT_OK(err, "bpf_tc_hook_create")) + goto cleanup; + + tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5); + err = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(err, "bpf_tc_attach")) + goto cleanup; + + link = bpf_program__attach_tcx(skel->progs.tc6, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc6 = link; + + assert_mprog_count(target, 1); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5"); + ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6"); + + err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4); + if (!ASSERT_OK(err, "link_update")) + goto cleanup; + + assert_mprog_count(target, 1); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); + ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); + + err = bpf_link__detach(skel->links.tc6); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup; + + assert_mprog_count(target, 0); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); + ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); + +cleanup: + tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; + err = bpf_tc_detach(&tc_hook, &tc_opts); + ASSERT_OK(err, "bpf_tc_detach"); + + tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; + bpf_tc_hook_destroy(&tc_hook); + + test_tc_link__destroy(skel); +} + +void serial_test_tc_links_chain_mixed(void) +{ + test_tc_chain_mixed(BPF_TCX_INGRESS); + test_tc_chain_mixed(BPF_TCX_EGRESS); +} + +static void test_tc_links_ingress(int target, bool chain_tc_old, + bool tcx_teardown_first) +{ + LIBBPF_OPTS(bpf_tc_opts, tc_opts, + .handle = 1, + .priority = 1, + ); + LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .ifindex = loopback, + .attach_point = BPF_TC_CUSTOM, + .parent = TC_H_INGRESS, + ); + bool hook_created = false, tc_attached = false; + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 pid1, pid2, pid3; + struct test_tc_link *skel; + struct bpf_link *link; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + if (chain_tc_old) { + ASSERT_OK(system("tc qdisc add dev lo ingress"), "add_ingress"); + hook_created = true; + + tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3); + err = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(err, "bpf_tc_attach")) + goto cleanup; + tc_attached = true; + } + + link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + assert_mprog_count(target, 2); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); + + err = bpf_link__detach(skel->links.tc2); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup; + + assert_mprog_count(target, 1); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); +cleanup: + if (tc_attached) { + tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; + err = bpf_tc_detach(&tc_hook, &tc_opts); + ASSERT_OK(err, "bpf_tc_detach"); + } + ASSERT_OK(system(ping_cmd), ping_cmd); + assert_mprog_count(target, 1); + if (hook_created && tcx_teardown_first) + ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress"); + ASSERT_OK(system(ping_cmd), ping_cmd); + test_tc_link__destroy(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + if (hook_created && !tcx_teardown_first) + ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress"); + ASSERT_OK(system(ping_cmd), ping_cmd); + assert_mprog_count(target, 0); +} + +void serial_test_tc_links_ingress(void) +{ + test_tc_links_ingress(BPF_TCX_INGRESS, true, true); + test_tc_links_ingress(BPF_TCX_INGRESS, true, false); + test_tc_links_ingress(BPF_TCX_INGRESS, false, false); +} + +static void test_tc_links_dev_mixed(int target) +{ + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + LIBBPF_OPTS(bpf_tc_hook, tc_hook); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 pid1, pid2, pid3, pid4; + struct test_tc_link *skel; + struct bpf_link *link; + int err, ifindex; + + ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); + ifindex = if_nametoindex("tcx_opts1"); + ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), + 0, "tc3_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), + 0, "tc4_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + assert_mprog_count_ifindex(ifindex, target, 1); + + link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + assert_mprog_count_ifindex(ifindex, target, 2); + + link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc3 = link; + + assert_mprog_count_ifindex(ifindex, target, 3); + + link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc4 = link; + + assert_mprog_count_ifindex(ifindex, target, 4); + + tc_hook.ifindex = ifindex; + tc_hook.attach_point = target == BPF_TCX_INGRESS ? + BPF_TC_INGRESS : BPF_TC_EGRESS; + + err = bpf_tc_hook_create(&tc_hook); + err = err == -EEXIST ? 0 : err; + if (!ASSERT_OK(err, "bpf_tc_hook_create")) + goto cleanup; + + tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5); + err = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(err, "bpf_tc_attach")) + goto cleanup; + + ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); + ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); + ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); + + ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex"); + ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex"); + ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex"); + ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex"); + + test_tc_link__destroy(skel); + return; +cleanup: + test_tc_link__destroy(skel); + + ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); + ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); + ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +} + +void serial_test_tc_links_dev_mixed(void) +{ + test_tc_links_dev_mixed(BPF_TCX_INGRESS); + test_tc_links_dev_mixed(BPF_TCX_EGRESS); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c new file mode 100644 index 000000000000..ca506d2fcf58 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c @@ -0,0 +1,2689 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include <uapi/linux/if_link.h> +#include <net/if.h> +#include <test_progs.h> + +#define loopback 1 +#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +#include "test_tc_link.skel.h" +#include "tc_helpers.h" + +void serial_test_tc_opts_basic(void) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, id1, id2; + struct test_tc_link *skel; + __u32 prog_ids[2]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + + assert_mprog_count(BPF_TCX_INGRESS, 0); + assert_mprog_count(BPF_TCX_EGRESS, 0); + + ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + err = bpf_prog_attach_opts(fd1, loopback, BPF_TCX_INGRESS, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(BPF_TCX_INGRESS, 1); + assert_mprog_count(BPF_TCX_EGRESS, 0); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_in; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + err = bpf_prog_attach_opts(fd2, loopback, BPF_TCX_EGRESS, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_in; + + assert_mprog_count(BPF_TCX_INGRESS, 1); + assert_mprog_count(BPF_TCX_EGRESS, 1); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_eg; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + +cleanup_eg: + err = bpf_prog_detach_opts(fd2, loopback, BPF_TCX_EGRESS, &optd); + ASSERT_OK(err, "prog_detach_eg"); + + assert_mprog_count(BPF_TCX_INGRESS, 1); + assert_mprog_count(BPF_TCX_EGRESS, 0); + +cleanup_in: + err = bpf_prog_detach_opts(fd1, loopback, BPF_TCX_INGRESS, &optd); + ASSERT_OK(err, "prog_detach_in"); + + assert_mprog_count(BPF_TCX_INGRESS, 0); + assert_mprog_count(BPF_TCX_EGRESS, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +static void test_tc_opts_before_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + __u32 prog_ids[5]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id3, id4, "prog_ids_3_4"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target; + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + .relative_fd = fd2, + ); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target2; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target3; + + ASSERT_EQ(optq.count, 3, "count"); + ASSERT_EQ(optq.revision, 4, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + .relative_id = id1, + ); + + err = bpf_prog_attach_opts(fd4, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target3; + + assert_mprog_count(target, 4); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + +cleanup_target4: + err = bpf_prog_detach_opts(fd4, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 3); + +cleanup_target3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup_target2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup_target: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_before(void) +{ + test_tc_opts_before_target(BPF_TCX_INGRESS); + test_tc_opts_before_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_after_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + __u32 prog_ids[5]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id3, id4, "prog_ids_3_4"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target; + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target2; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target3; + + ASSERT_EQ(optq.count, 3, "count"); + ASSERT_EQ(optq.revision, 4, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd4, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target3; + + assert_mprog_count(target, 4); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + +cleanup_target4: + err = bpf_prog_detach_opts(fd4, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 3); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target3; + + ASSERT_EQ(optq.count, 3, "count"); + ASSERT_EQ(optq.revision, 6, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + +cleanup_target3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 7, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +cleanup_target2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 8, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + +cleanup_target: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_after(void) +{ + test_tc_opts_after_target(BPF_TCX_INGRESS); + test_tc_opts_after_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_revision_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, id1, id2; + struct test_tc_link *skel; + __u32 prog_ids[3]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 1, + ); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, -ESTALE, "prog_attach")) + goto cleanup_target; + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 2, + ); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target; + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + + LIBBPF_OPTS_RESET(optd, + .expected_revision = 2, + ); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_EQ(err, -ESTALE, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup_target2: + LIBBPF_OPTS_RESET(optd, + .expected_revision = 3, + ); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup_target: + LIBBPF_OPTS_RESET(optd); + + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_revision(void) +{ + test_tc_opts_revision_target(BPF_TCX_INGRESS); + test_tc_opts_revision_target(BPF_TCX_EGRESS); +} + +static void test_tc_chain_classic(int target, bool chain_tc_old) +{ + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + bool hook_created = false, tc_attached = false; + __u32 fd1, fd2, fd3, id1, id2, id3; + struct test_tc_link *skel; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + if (chain_tc_old) { + tc_hook.attach_point = target == BPF_TCX_INGRESS ? + BPF_TC_INGRESS : BPF_TC_EGRESS; + err = bpf_tc_hook_create(&tc_hook); + if (err == 0) + hook_created = true; + err = err == -EEXIST ? 0 : err; + if (!ASSERT_OK(err, "bpf_tc_hook_create")) + goto cleanup; + + tc_opts.prog_fd = fd3; + err = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(err, "bpf_tc_attach")) + goto cleanup; + tc_attached = true; + } + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_detach; + + assert_mprog_count(target, 2); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup_detach; + + assert_mprog_count(target, 1); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); + +cleanup_detach: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup; + + assert_mprog_count(target, 0); +cleanup: + if (tc_attached) { + tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; + err = bpf_tc_detach(&tc_hook, &tc_opts); + ASSERT_OK(err, "bpf_tc_detach"); + } + if (hook_created) { + tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; + bpf_tc_hook_destroy(&tc_hook); + } + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_opts_chain_classic(void) +{ + test_tc_chain_classic(BPF_TCX_INGRESS, false); + test_tc_chain_classic(BPF_TCX_EGRESS, false); + test_tc_chain_classic(BPF_TCX_INGRESS, true); + test_tc_chain_classic(BPF_TCX_EGRESS, true); +} + +static void test_tc_opts_replace_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, id1, id2, id3, detach_fd; + __u32 prog_ids[4], prog_flags[4]; + struct test_tc_link *skel; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + .relative_id = id1, + .expected_revision = 2, + ); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target; + + detach_fd = fd2; + + assert_mprog_count(target, 2); + + optq.prog_attach_flags = prog_flags; + optq.prog_ids = prog_ids; + + memset(prog_flags, 0, sizeof(prog_flags)); + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]"); + ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]"); + ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = fd2, + .expected_revision = 3, + ); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target2; + + detach_fd = fd3; + + assert_mprog_count(target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 4, "revision"); + ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE | BPF_F_BEFORE, + .replace_prog_fd = fd3, + .relative_fd = fd1, + .expected_revision = 4, + ); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target2; + + detach_fd = fd2; + + assert_mprog_count(target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = fd2, + ); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + ASSERT_EQ(err, -EEXIST, "prog_attach"); + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE | BPF_F_AFTER, + .replace_prog_fd = fd2, + .relative_fd = fd1, + .expected_revision = 5, + ); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + ASSERT_EQ(err, -ERANGE, "prog_attach"); + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE | BPF_F_AFTER | BPF_F_REPLACE, + .replace_prog_fd = fd2, + .relative_fd = fd1, + .expected_revision = 5, + ); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + ASSERT_EQ(err, -ERANGE, "prog_attach"); + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + .relative_id = id1, + .expected_revision = 5, + ); + +cleanup_target2: + err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup_target: + LIBBPF_OPTS_RESET(optd); + + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_replace(void) +{ + test_tc_opts_replace_target(BPF_TCX_INGRESS); + test_tc_opts_replace_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_invalid_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + __u32 fd1, fd2, id1, id2; + struct test_tc_link *skel; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE | BPF_F_AFTER, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -ERANGE, "prog_attach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE | BPF_F_ID, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER | BPF_F_ID, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .relative_fd = fd2, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE | BPF_F_AFTER, + .relative_fd = fd2, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_ID, + .relative_id = id2, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -ENOENT, "prog_attach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -EEXIST, "prog_attach"); + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -EEXIST, "prog_attach"); + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -EEXIST, "prog_attach"); + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -EINVAL, "prog_attach_x1"); + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + ASSERT_EQ(err, -EEXIST, "prog_attach"); + assert_mprog_count(target, 1); + + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_invalid(void) +{ + test_tc_opts_invalid_target(BPF_TCX_INGRESS); + test_tc_opts_invalid_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_prepend_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + __u32 prog_ids[5]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id3, id4, "prog_ids_3_4"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + ); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target; + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + ); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target2; + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + ); + + err = bpf_prog_attach_opts(fd4, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target3; + + assert_mprog_count(target, 4); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + +cleanup_target4: + err = bpf_prog_detach_opts(fd4, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 3); + +cleanup_target3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup_target2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup_target: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_prepend(void) +{ + test_tc_opts_prepend_target(BPF_TCX_INGRESS); + test_tc_opts_prepend_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_append_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + __u32 prog_ids[5]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id3, id4, "prog_ids_3_4"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + ); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target; + + assert_mprog_count(target, 2); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + ); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target2; + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_AFTER, + ); + + err = bpf_prog_attach_opts(fd4, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_target3; + + assert_mprog_count(target, 4); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_target4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + +cleanup_target4: + err = bpf_prog_detach_opts(fd4, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 3); + +cleanup_target3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup_target2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup_target: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_append(void) +{ + test_tc_opts_append_target(BPF_TCX_INGRESS); + test_tc_opts_append_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_dev_cleanup_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + int err, ifindex; + + ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); + ifindex = if_nametoindex("tcx_opts1"); + ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id3, id4, "prog_ids_3_4"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count_ifindex(ifindex, target, 0); + + err = bpf_prog_attach_opts(fd1, ifindex, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count_ifindex(ifindex, target, 1); + + err = bpf_prog_attach_opts(fd2, ifindex, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count_ifindex(ifindex, target, 2); + + err = bpf_prog_attach_opts(fd3, ifindex, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count_ifindex(ifindex, target, 3); + + err = bpf_prog_attach_opts(fd4, ifindex, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count_ifindex(ifindex, target, 4); + + ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); + ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); + ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); + return; +cleanup3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count_ifindex(ifindex, target, 2); +cleanup2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count_ifindex(ifindex, target, 1); +cleanup1: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count_ifindex(ifindex, target, 0); +cleanup: + test_tc_link__destroy(skel); + + ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); + ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); + ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +} + +void serial_test_tc_opts_dev_cleanup(void) +{ + test_tc_opts_dev_cleanup_target(BPF_TCX_INGRESS); + test_tc_opts_dev_cleanup_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_mixed_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_tcx_opts, optl); + __u32 pid1, pid2, pid3, pid4, lid2, lid4; + __u32 prog_flags[4], link_flags[4]; + __u32 prog_ids[4], link_ids[4]; + struct test_tc_link *skel; + struct bpf_link *link; + int err, detach_fd; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), + 0, "tc3_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), + 0, "tc4_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); + ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), + loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + detach_fd = bpf_program__fd(skel->progs.tc1); + + assert_mprog_count(target, 1); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup1; + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = bpf_program__fd(skel->progs.tc1), + ); + + err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2), + loopback, target, &opta); + ASSERT_EQ(err, -EEXIST, "prog_attach"); + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = bpf_program__fd(skel->progs.tc2), + ); + + err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), + loopback, target, &opta); + ASSERT_EQ(err, -EEXIST, "prog_attach"); + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = bpf_program__fd(skel->progs.tc2), + ); + + err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3), + loopback, target, &opta); + ASSERT_EQ(err, -EBUSY, "prog_attach"); + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = bpf_program__fd(skel->progs.tc1), + ); + + err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3), + loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + detach_fd = bpf_program__fd(skel->progs.tc3); + + assert_mprog_count(target, 2); + + link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup1; + skel->links.tc4 = link; + + lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = bpf_program__fd(skel->progs.tc4), + ); + + err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2), + loopback, target, &opta); + ASSERT_EQ(err, -EEXIST, "prog_attach"); + + optq.prog_ids = prog_ids; + optq.prog_attach_flags = prog_flags; + optq.link_ids = link_ids; + optq.link_attach_flags = link_flags; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(prog_flags, 0, sizeof(prog_flags)); + memset(link_ids, 0, sizeof(link_ids)); + memset(link_flags, 0, sizeof(link_flags)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup1; + + ASSERT_EQ(optq.count, 3, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]"); + ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]"); + ASSERT_EQ(optq.link_ids[0], 0, "link_ids[0]"); + ASSERT_EQ(optq.link_attach_flags[0], 0, "link_flags[0]"); + ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]"); + ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); + ASSERT_EQ(optq.link_attach_flags[1], 0, "link_flags[1]"); + ASSERT_EQ(optq.prog_ids[2], pid4, "prog_ids[2]"); + ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]"); + ASSERT_EQ(optq.link_ids[2], lid4, "link_ids[2]"); + ASSERT_EQ(optq.link_attach_flags[2], 0, "link_flags[2]"); + ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + ASSERT_EQ(optq.prog_attach_flags[3], 0, "prog_flags[3]"); + ASSERT_EQ(optq.link_ids[3], 0, "link_ids[3]"); + ASSERT_EQ(optq.link_attach_flags[3], 0, "link_flags[3]"); + + ASSERT_OK(system(ping_cmd), ping_cmd); + +cleanup1: + err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_opts_mixed(void) +{ + test_tc_opts_mixed_target(BPF_TCX_INGRESS); + test_tc_opts_mixed_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_demixed_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_tcx_opts, optl); + struct test_tc_link *skel; + struct bpf_link *link; + __u32 pid1, pid2; + int err; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), + 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), + 0, "tc2_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), + loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup1; + skel->links.tc2 = link; + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_EQ(err, -EBUSY, "prog_detach"); + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 1); + goto cleanup; + +cleanup1: + err = bpf_prog_detach_opts(bpf_program__fd(skel->progs.tc1), + loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup: + test_tc_link__destroy(skel); + assert_mprog_count(target, 0); +} + +void serial_test_tc_opts_demixed(void) +{ + test_tc_opts_demixed_target(BPF_TCX_INGRESS); + test_tc_opts_demixed_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_detach_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + __u32 prog_ids[5]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id3, id4, "prog_ids_3_4"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count(target, 2); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count(target, 3); + + err = bpf_prog_attach_opts(fd4, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count(target, 4); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 3); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 3, "count"); + ASSERT_EQ(optq.revision, 6, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 7, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + LIBBPF_OPTS_RESET(optd); + + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_EQ(err, -ENOENT, "prog_detach"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_EQ(err, -ENOENT, "prog_detach"); + goto cleanup; + +cleanup4: + err = bpf_prog_detach_opts(fd4, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 3); + +cleanup3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup1: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_detach(void) +{ + test_tc_opts_detach_target(BPF_TCX_INGRESS); + test_tc_opts_detach_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_detach_before_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + __u32 prog_ids[5]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id3, id4, "prog_ids_3_4"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count(target, 2); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count(target, 3); + + err = bpf_prog_attach_opts(fd4, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count(target, 4); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + .relative_fd = fd2, + ); + + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 3); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 3, "count"); + ASSERT_EQ(optq.revision, 6, "revision"); + ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + .relative_fd = fd2, + ); + + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_EQ(err, -ENOENT, "prog_detach"); + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + .relative_fd = fd4, + ); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_EQ(err, -ERANGE, "prog_detach"); + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + .relative_fd = fd1, + ); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_EQ(err, -ENOENT, "prog_detach"); + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + .relative_fd = fd3, + ); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 7, "revision"); + ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + .relative_fd = fd4, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 1); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 8, "revision"); + ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_BEFORE, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 0); + goto cleanup; + +cleanup4: + err = bpf_prog_detach_opts(fd4, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 3); + +cleanup3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup1: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_detach_before(void) +{ + test_tc_opts_detach_before_target(BPF_TCX_INGRESS); + test_tc_opts_detach_before_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_detach_after_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + __u32 prog_ids[5]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id3, id4, "prog_ids_3_4"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count(target, 2); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count(target, 3); + + err = bpf_prog_attach_opts(fd4, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count(target, 4); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 3); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 3, "count"); + ASSERT_EQ(optq.revision, 6, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_EQ(err, -ENOENT, "prog_detach"); + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + .relative_fd = fd4, + ); + + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_EQ(err, -ERANGE, "prog_detach"); + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + .relative_fd = fd3, + ); + + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_EQ(err, -ERANGE, "prog_detach"); + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_EQ(err, -ERANGE, "prog_detach"); + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 7, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + .relative_fd = fd1, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 1); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 8, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + + LIBBPF_OPTS_RESET(optd, + .flags = BPF_F_AFTER, + ); + + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + + assert_mprog_count(target, 0); + goto cleanup; + +cleanup4: + err = bpf_prog_detach_opts(fd4, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 3); + +cleanup3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup1: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_detach_after(void) +{ + test_tc_opts_detach_after_target(BPF_TCX_INGRESS); + test_tc_opts_detach_after_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_delete_empty(int target, bool chain_tc_old) +{ + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + int err; + + assert_mprog_count(target, 0); + if (chain_tc_old) { + tc_hook.attach_point = target == BPF_TCX_INGRESS ? + BPF_TC_INGRESS : BPF_TC_EGRESS; + err = bpf_tc_hook_create(&tc_hook); + ASSERT_OK(err, "bpf_tc_hook_create"); + assert_mprog_count(target, 0); + } + err = bpf_prog_detach_opts(0, loopback, target, &optd); + ASSERT_EQ(err, -ENOENT, "prog_detach"); + if (chain_tc_old) { + tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; + bpf_tc_hook_destroy(&tc_hook); + } + assert_mprog_count(target, 0); +} + +void serial_test_tc_opts_delete_empty(void) +{ + test_tc_opts_delete_empty(BPF_TCX_INGRESS, false); + test_tc_opts_delete_empty(BPF_TCX_EGRESS, false); + test_tc_opts_delete_empty(BPF_TCX_INGRESS, true); + test_tc_opts_delete_empty(BPF_TCX_EGRESS, true); +} + +static void test_tc_chain_mixed(int target) +{ + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + __u32 fd1, fd2, fd3, id1, id2, id3; + struct test_tc_link *skel; + int err, detach_fd; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc4); + fd2 = bpf_program__fd(skel->progs.tc5); + fd3 = bpf_program__fd(skel->progs.tc6); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + + assert_mprog_count(target, 0); + + tc_hook.attach_point = target == BPF_TCX_INGRESS ? + BPF_TC_INGRESS : BPF_TC_EGRESS; + err = bpf_tc_hook_create(&tc_hook); + err = err == -EEXIST ? 0 : err; + if (!ASSERT_OK(err, "bpf_tc_hook_create")) + goto cleanup; + + tc_opts.prog_fd = fd2; + err = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(err, "bpf_tc_attach")) + goto cleanup_hook; + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_filter; + + detach_fd = fd3; + + assert_mprog_count(target, 1); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5"); + ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_REPLACE, + .replace_prog_fd = fd3, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_opts; + + detach_fd = fd1; + + assert_mprog_count(target, 1); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); + ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); + +cleanup_opts: + err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + + tc_skel_reset_all_seen(skel); + ASSERT_OK(system(ping_cmd), ping_cmd); + + ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); + ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); + +cleanup_filter: + tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; + err = bpf_tc_detach(&tc_hook, &tc_opts); + ASSERT_OK(err, "bpf_tc_detach"); + +cleanup_hook: + tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; + bpf_tc_hook_destroy(&tc_hook); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_chain_mixed(void) +{ + test_tc_chain_mixed(BPF_TCX_INGRESS); + test_tc_chain_mixed(BPF_TCX_EGRESS); +} + +static int generate_dummy_prog(void) +{ + const struct bpf_insn prog_insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, opts); + const size_t log_buf_sz = 256; + char *log_buf; + int fd = -1; + + log_buf = malloc(log_buf_sz); + if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) + return fd; + opts.log_buf = log_buf; + opts.log_size = log_buf_sz; + + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, "tcx_prog", "GPL", + prog_insns, prog_insn_cnt, &opts); + ASSERT_STREQ(log_buf, "", "log_0"); + ASSERT_GE(fd, 0, "prog_fd"); + free(log_buf); + return fd; +} + +static void test_tc_opts_max_target(int target, int flags, bool relative) +{ + int err, ifindex, i, prog_fd, last_fd = -1; + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + const int max_progs = 63; + + ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); + ifindex = if_nametoindex("tcx_opts1"); + ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + + assert_mprog_count_ifindex(ifindex, target, 0); + + for (i = 0; i < max_progs; i++) { + prog_fd = generate_dummy_prog(); + if (!ASSERT_GE(prog_fd, 0, "dummy_prog")) + goto cleanup; + err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + assert_mprog_count_ifindex(ifindex, target, i + 1); + if (i == max_progs - 1 && relative) + last_fd = prog_fd; + else + close(prog_fd); + } + + prog_fd = generate_dummy_prog(); + if (!ASSERT_GE(prog_fd, 0, "dummy_prog")) + goto cleanup; + opta.flags = flags; + if (last_fd > 0) + opta.relative_fd = last_fd; + err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta); + ASSERT_EQ(err, -ERANGE, "prog_64_attach"); + assert_mprog_count_ifindex(ifindex, target, max_progs); + close(prog_fd); +cleanup: + if (last_fd > 0) + close(last_fd); + ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); + ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); + ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +} + +void serial_test_tc_opts_max(void) +{ + test_tc_opts_max_target(BPF_TCX_INGRESS, 0, false); + test_tc_opts_max_target(BPF_TCX_EGRESS, 0, false); + + test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_BEFORE, false); + test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_BEFORE, true); + + test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true); + test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false); +} + +static void test_tc_opts_query_target(int target) +{ + const size_t attr_size = offsetofend(union bpf_attr, query); + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; + struct test_tc_link *skel; + union bpf_attr attr; + __u32 prog_ids[5]; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + fd3 = bpf_program__fd(skel->progs.tc3); + fd4 = bpf_program__fd(skel->progs.tc4); + + id1 = id_from_prog_fd(fd1); + id2 = id_from_prog_fd(fd2); + id3 = id_from_prog_fd(fd3); + id4 = id_from_prog_fd(fd4); + + assert_mprog_count(target, 0); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 1, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count(target, 1); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 2, + ); + + err = bpf_prog_attach_opts(fd2, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup1; + + assert_mprog_count(target, 2); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 3, + ); + + err = bpf_prog_attach_opts(fd3, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup2; + + assert_mprog_count(target, 3); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = 4, + ); + + err = bpf_prog_attach_opts(fd4, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup3; + + assert_mprog_count(target, 4); + + /* Test 1: Double query via libbpf API */ + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids, NULL, "prog_ids"); + ASSERT_EQ(optq.link_ids, NULL, "link_ids"); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.prog_ids = prog_ids; + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(optq.count, 4, "count"); + ASSERT_EQ(optq.revision, 5, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); + ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(optq.link_ids, NULL, "link_ids"); + + /* Test 2: Double query via bpf_attr & bpf(2) directly */ + memset(&attr, 0, attr_size); + attr.query.target_ifindex = loopback; + attr.query.attach_type = target; + + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(attr.query.count, 4, "count"); + ASSERT_EQ(attr.query.revision, 5, "revision"); + ASSERT_EQ(attr.query.query_flags, 0, "query_flags"); + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags"); + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex"); + ASSERT_EQ(attr.query.attach_type, target, "attach_type"); + ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids"); + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags"); + ASSERT_EQ(attr.query.link_ids, 0, "link_ids"); + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags"); + + memset(prog_ids, 0, sizeof(prog_ids)); + attr.query.prog_ids = ptr_to_u64(prog_ids); + + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(attr.query.count, 4, "count"); + ASSERT_EQ(attr.query.revision, 5, "revision"); + ASSERT_EQ(attr.query.query_flags, 0, "query_flags"); + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags"); + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex"); + ASSERT_EQ(attr.query.attach_type, target, "attach_type"); + ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids"); + ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]"); + ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags"); + ASSERT_EQ(attr.query.link_ids, 0, "link_ids"); + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags"); + +cleanup4: + err = bpf_prog_detach_opts(fd4, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 3); + +cleanup3: + err = bpf_prog_detach_opts(fd3, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 2); + +cleanup2: + err = bpf_prog_detach_opts(fd2, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 1); + +cleanup1: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); + +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_query(void) +{ + test_tc_opts_query_target(BPF_TCX_INGRESS); + test_tc_opts_query_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_query_attach_target(int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + struct test_tc_link *skel; + __u32 prog_ids[2]; + __u32 fd1, id1; + int err; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + id1 = id_from_prog_fd(fd1); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 0, "count"); + ASSERT_EQ(optq.revision, 1, "revision"); + + LIBBPF_OPTS_RESET(opta, + .expected_revision = optq.revision, + ); + + err = bpf_prog_attach_opts(fd1, loopback, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.prog_ids = prog_ids; + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(loopback, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup1; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + +cleanup1: + err = bpf_prog_detach_opts(fd1, loopback, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count(target, 0); +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_query_attach(void) +{ + test_tc_opts_query_attach_target(BPF_TCX_INGRESS); + test_tc_opts_query_attach_target(BPF_TCX_EGRESS); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c index 13bcaeb028b8..56685fc03c7e 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c @@ -347,7 +347,7 @@ static void syncookie_estab(void) exp_active_estab_in.max_delack_ms = 22; exp_passive_hdr_stg.syncookie = true; - exp_active_hdr_stg.resend_syn = true, + exp_active_hdr_stg.resend_syn = true; prepare_out(); diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c new file mode 100644 index 000000000000..0cca4e8ae38e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include <sched.h> +#include <pthread.h> +#include <stdbool.h> +#include <bpf/btf.h> +#include <test_progs.h> + +#include "test_bpf_ma.skel.h" + +void test_test_bpf_ma(void) +{ + struct test_bpf_ma *skel; + struct btf *btf; + int i, err; + + skel = test_bpf_ma__open(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + btf = bpf_object__btf(skel->obj); + if (!ASSERT_OK_PTR(btf, "btf")) + goto out; + + for (i = 0; i < ARRAY_SIZE(skel->rodata->data_sizes); i++) { + char name[32]; + int id; + + snprintf(name, sizeof(name), "bin_data_%u", skel->rodata->data_sizes[i]); + id = btf__find_by_name_kind(btf, name, BTF_KIND_STRUCT); + if (!ASSERT_GT(id, 0, "bin_data")) + goto out; + skel->rodata->data_btf_ids[i] = id; + } + + err = test_bpf_ma__load(skel); + if (!ASSERT_OK(err, "load")) + goto out; + + err = test_bpf_ma__attach(skel); + if (!ASSERT_OK(err, "attach")) + goto out; + + skel->bss->pid = getpid(); + usleep(1); + ASSERT_OK(skel->bss->err, "test error"); +out: + test_bpf_ma__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c new file mode 100644 index 000000000000..375677c19146 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.*/ + +#include <test_progs.h> +#include <network_helpers.h> +#include "test_ldsx_insn.skel.h" + +static void test_map_val_and_probed_memory(void) +{ + struct test_ldsx_insn *skel; + int err; + + skel = test_ldsx_insn__open(); + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) + return; + + if (skel->rodata->skip) { + test__skip(); + goto out; + } + + bpf_program__set_autoload(skel->progs.rdonly_map_prog, true); + bpf_program__set_autoload(skel->progs.map_val_prog, true); + bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true); + + err = test_ldsx_insn__load(skel); + if (!ASSERT_OK(err, "test_ldsx_insn__load")) + goto out; + + err = test_ldsx_insn__attach(skel); + if (!ASSERT_OK(err, "test_ldsx_insn__attach")) + goto out; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + + ASSERT_EQ(skel->bss->done1, 1, "done1"); + ASSERT_EQ(skel->bss->ret1, 1, "ret1"); + ASSERT_EQ(skel->bss->done2, 1, "done2"); + ASSERT_EQ(skel->bss->ret2, 1, "ret2"); + ASSERT_EQ(skel->bss->int_member, -1, "int_member"); + +out: + test_ldsx_insn__destroy(skel); +} + +static void test_ctx_member_sign_ext(void) +{ + struct test_ldsx_insn *skel; + int err, fd, cgroup_fd; + char buf[16] = {0}; + socklen_t optlen; + + cgroup_fd = test__join_cgroup("/ldsx_test"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /ldsx_test")) + return; + + skel = test_ldsx_insn__open(); + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) + goto close_cgroup_fd; + + if (skel->rodata->skip) { + test__skip(); + goto destroy_skel; + } + + bpf_program__set_autoload(skel->progs._getsockopt, true); + + err = test_ldsx_insn__load(skel); + if (!ASSERT_OK(err, "test_ldsx_insn__load")) + goto destroy_skel; + + skel->links._getsockopt = + bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link")) + goto destroy_skel; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(fd, 0, "socket")) + goto destroy_skel; + + optlen = sizeof(buf); + (void)getsockopt(fd, SOL_IP, IP_TTL, buf, &optlen); + + ASSERT_EQ(skel->bss->set_optlen, -1, "optlen"); + ASSERT_EQ(skel->bss->set_retval, -1, "retval"); + + close(fd); +destroy_skel: + test_ldsx_insn__destroy(skel); +close_cgroup_fd: + close(cgroup_fd); +} + +static void test_ctx_member_narrow_sign_ext(void) +{ + struct test_ldsx_insn *skel; + struct __sk_buff skb = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + int err, prog_fd; + + skel = test_ldsx_insn__open(); + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) + return; + + if (skel->rodata->skip) { + test__skip(); + goto out; + } + + bpf_program__set_autoload(skel->progs._tc, true); + + err = test_ldsx_insn__load(skel); + if (!ASSERT_OK(err, "test_ldsx_insn__load")) + goto out; + + prog_fd = bpf_program__fd(skel->progs._tc); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + + ASSERT_EQ(skel->bss->set_mark, -2, "set_mark"); + +out: + test_ldsx_insn__destroy(skel); +} + +void test_ldsx_insn(void) +{ + if (test__start_subtest("map_val and probed_memory")) + test_map_val_and_probed_memory(); + if (test__start_subtest("ctx_member_sign_ext")) + test_ctx_member_sign_ext(); + if (test__start_subtest("ctx_member_narrow_sign_ext")) + test_ctx_member_narrow_sign_ext(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c index 290c21dbe65a..ce2c61d62fc6 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer.c +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <test_progs.h> #include "timer.skel.h" +#include "timer_failure.skel.h" static int timer(struct timer *timer_skel) { @@ -49,10 +50,11 @@ void serial_test_timer(void) timer_skel = timer__open_and_load(); if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load")) - goto cleanup; + return; err = timer(timer_skel); ASSERT_OK(err, "timer"); -cleanup: timer__destroy(timer_skel); + + RUN_TESTS(timer_failure); } diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index 1c75a32186d6..fe0fb0c9849a 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -55,6 +55,25 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t6, 1, "t6 ret"); + ASSERT_EQ(skel->bss->t7_a, 16, "t7:a"); + ASSERT_EQ(skel->bss->t7_b, 17, "t7:b"); + ASSERT_EQ(skel->bss->t7_c, 18, "t7:c"); + ASSERT_EQ(skel->bss->t7_d, 19, "t7:d"); + ASSERT_EQ(skel->bss->t7_e, 20, "t7:e"); + ASSERT_EQ(skel->bss->t7_f_a, 21, "t7:f.a"); + ASSERT_EQ(skel->bss->t7_f_b, 22, "t7:f.b"); + ASSERT_EQ(skel->bss->t7_ret, 133, "t7 ret"); + + ASSERT_EQ(skel->bss->t8_a, 16, "t8:a"); + ASSERT_EQ(skel->bss->t8_b, 17, "t8:b"); + ASSERT_EQ(skel->bss->t8_c, 18, "t8:c"); + ASSERT_EQ(skel->bss->t8_d, 19, "t8:d"); + ASSERT_EQ(skel->bss->t8_e, 20, "t8:e"); + ASSERT_EQ(skel->bss->t8_f_a, 21, "t8:f.a"); + ASSERT_EQ(skel->bss->t8_f_b, 22, "t8:f.b"); + ASSERT_EQ(skel->bss->t8_g, 23, "t8:g"); + ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret"); + tracing_struct__detach(skel); destroy_skel: tracing_struct__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c index e91d0d1769f1..6cd7349d4a2b 100644 --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -88,8 +88,8 @@ void serial_test_trampoline_count(void) if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) goto cleanup; - ASSERT_EQ(opts.retval & 0xffff, 4, "bpf_modify_return_test.result"); - ASSERT_EQ(opts.retval >> 16, 1, "bpf_modify_return_test.side_effect"); + ASSERT_EQ(opts.retval & 0xffff, 33, "bpf_modify_return_test.result"); + ASSERT_EQ(opts.retval >> 16, 2, "bpf_modify_return_test.side_effect"); cleanup: for (; i >= 0; i--) { diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c index 8383a99f610f..0adf8d9475cb 100644 --- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c +++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c @@ -171,7 +171,11 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s prog_insns, prog_insn_cnt, &load_opts), -EPERM, "prog_load_fails"); - for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++) + /* some map types require particular correct parameters which could be + * sanity-checked before enforcing -EPERM, so only validate that + * the simple ARRAY and HASH maps are failing with -EPERM + */ + for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++) ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL), -EPERM, "map_create_fails"); diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c new file mode 100644 index 000000000000..cd051d3901a9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <unistd.h> +#include <test_progs.h> +#include "uprobe_multi.skel.h" +#include "uprobe_multi_bench.skel.h" +#include "uprobe_multi_usdt.skel.h" +#include "bpf/libbpf_internal.h" +#include "testing_helpers.h" + +static char test_data[] = "test_data"; + +noinline void uprobe_multi_func_1(void) +{ + asm volatile (""); +} + +noinline void uprobe_multi_func_2(void) +{ + asm volatile (""); +} + +noinline void uprobe_multi_func_3(void) +{ + asm volatile (""); +} + +struct child { + int go[2]; + int pid; +}; + +static void release_child(struct child *child) +{ + int child_status; + + if (!child) + return; + close(child->go[1]); + close(child->go[0]); + if (child->pid > 0) + waitpid(child->pid, &child_status, 0); +} + +static void kick_child(struct child *child) +{ + char c = 1; + + if (child) { + write(child->go[1], &c, 1); + release_child(child); + } + fflush(NULL); +} + +static struct child *spawn_child(void) +{ + static struct child child; + int err; + int c; + + /* pipe to notify child to execute the trigger functions */ + if (pipe(child.go)) + return NULL; + + child.pid = fork(); + if (child.pid < 0) { + release_child(&child); + errno = EINVAL; + return NULL; + } + + /* child */ + if (child.pid == 0) { + close(child.go[1]); + + /* wait for parent's kick */ + err = read(child.go[0], &c, 1); + if (err != 1) + exit(err); + + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + exit(errno); + } + + return &child; +} + +static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) +{ + skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; + skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; + skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + + skel->bss->user_ptr = test_data; + + /* + * Disable pid check in bpf program if we are pid filter test, + * because the probe should be executed only by child->pid + * passed at the probe attach. + */ + skel->bss->pid = child ? 0 : getpid(); + + if (child) + kick_child(child); + + /* trigger all probes */ + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + /* + * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123] + * function and each slepable probe (6) increments uprobe_multi_sleep_result. + */ + ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result"); + + if (child) + ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid"); +} + +static void test_skel_api(void) +{ + struct uprobe_multi *skel = NULL; + int err; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + err = uprobe_multi__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi__attach")) + goto cleanup; + + uprobe_multi_test_run(skel, NULL); + +cleanup: + uprobe_multi__destroy(skel); +} + +static void +__test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts, + struct child *child) +{ + pid_t pid = child ? child->pid : -1; + struct uprobe_multi *skel = NULL; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + opts->retprobe = false; + skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid, + binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = true; + skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid, + binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = false; + skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid, + binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = true; + skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep, + pid, binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = false; + skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1, + binary, pattern, opts); + if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + uprobe_multi_test_run(skel, child); + +cleanup: + uprobe_multi__destroy(skel); +} + +static void +test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) +{ + struct child *child; + + /* no pid filter */ + __test_attach_api(binary, pattern, opts, NULL); + + /* pid filter */ + child = spawn_child(); + if (!ASSERT_OK_PTR(child, "spawn_child")) + return; + + __test_attach_api(binary, pattern, opts, child); +} + +static void test_attach_api_pattern(void) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + + test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts); + test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts); +} + +static void test_attach_api_syms(void) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + test_attach_api("/proc/self/exe", NULL, &opts); +} + +static void __test_link_api(struct child *child) +{ + int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1; + LIBBPF_OPTS(bpf_link_create_opts, opts); + const char *path = "/proc/self/exe"; + struct uprobe_multi *skel = NULL; + unsigned long *offsets = NULL; + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + int link_extra_fd = -1; + int err; + + err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets")) + return; + + opts.uprobe_multi.path = path; + opts.uprobe_multi.offsets = offsets; + opts.uprobe_multi.cnt = ARRAY_SIZE(syms); + opts.uprobe_multi.pid = child ? child->pid : 0; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + opts.kprobe_multi.flags = 0; + prog_fd = bpf_program__fd(skel->progs.uprobe); + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link1_fd, 0, "link1_fd")) + goto cleanup; + + opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.uretprobe); + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link2_fd, 0, "link2_fd")) + goto cleanup; + + opts.kprobe_multi.flags = 0; + prog_fd = bpf_program__fd(skel->progs.uprobe_sleep); + link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link3_fd, 0, "link3_fd")) + goto cleanup; + + opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep); + link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link4_fd, 0, "link4_fd")) + goto cleanup; + + opts.kprobe_multi.flags = 0; + opts.uprobe_multi.pid = 0; + prog_fd = bpf_program__fd(skel->progs.uprobe_extra); + link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd")) + goto cleanup; + + uprobe_multi_test_run(skel, child); + +cleanup: + if (link1_fd >= 0) + close(link1_fd); + if (link2_fd >= 0) + close(link2_fd); + if (link3_fd >= 0) + close(link3_fd); + if (link4_fd >= 0) + close(link4_fd); + if (link_extra_fd >= 0) + close(link_extra_fd); + + uprobe_multi__destroy(skel); + free(offsets); +} + +void test_link_api(void) +{ + struct child *child; + + /* no pid filter */ + __test_link_api(NULL); + + /* pid filter */ + child = spawn_child(); + if (!ASSERT_OK_PTR(child, "spawn_child")) + return; + + __test_link_api(child); +} + +static void test_bench_attach_uprobe(void) +{ + long attach_start_ns = 0, attach_end_ns = 0; + struct uprobe_multi_bench *skel = NULL; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + int err; + + skel = uprobe_multi_bench__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load")) + goto cleanup; + + attach_start_ns = get_time_ns(); + + err = uprobe_multi_bench__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_bench__attach")) + goto cleanup; + + attach_end_ns = get_time_ns(); + + system("./uprobe_multi bench"); + + ASSERT_EQ(skel->bss->count, 50000, "uprobes_count"); + +cleanup: + detach_start_ns = get_time_ns(); + uprobe_multi_bench__destroy(skel); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); +} + +static void test_bench_attach_usdt(void) +{ + long attach_start_ns = 0, attach_end_ns = 0; + struct uprobe_multi_usdt *skel = NULL; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + + skel = uprobe_multi_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open")) + goto cleanup; + + attach_start_ns = get_time_ns(); + + skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi", + "test", "usdt", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt")) + goto cleanup; + + attach_end_ns = get_time_ns(); + + system("./uprobe_multi usdt"); + + ASSERT_EQ(skel->bss->count, 50000, "usdt_count"); + +cleanup: + detach_start_ns = get_time_ns(); + uprobe_multi_usdt__destroy(skel); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); +} + +void test_uprobe_multi_test(void) +{ + if (test__start_subtest("skel_api")) + test_skel_api(); + if (test__start_subtest("attach_api_pattern")) + test_attach_api_pattern(); + if (test__start_subtest("attach_api_syms")) + test_attach_api_syms(); + if (test__start_subtest("link_api")) + test_link_api(); + if (test__start_subtest("bench_uprobe")) + test_bench_attach_uprobe(); + if (test__start_subtest("bench_usdt")) + test_bench_attach_usdt(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 2497716ee379..e3e68c97b40c 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -11,6 +11,7 @@ #include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" #include "verifier_bpf_get_stack.skel.h" +#include "verifier_bswap.skel.h" #include "verifier_btf_ctx_access.skel.h" #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" @@ -24,6 +25,7 @@ #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" #include "verifier_div_overflow.skel.h" +#include "verifier_gotol.skel.h" #include "verifier_helper_access_var_len.skel.h" #include "verifier_helper_packet_access.skel.h" #include "verifier_helper_restricted.skel.h" @@ -31,6 +33,7 @@ #include "verifier_int_ptr.skel.h" #include "verifier_jeq_infer_not_null.skel.h" #include "verifier_ld_ind.skel.h" +#include "verifier_ldsx.skel.h" #include "verifier_leak_ptr.skel.h" #include "verifier_loops1.skel.h" #include "verifier_lwt.skel.h" @@ -40,6 +43,7 @@ #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" #include "verifier_meta_access.skel.h" +#include "verifier_movsx.skel.h" #include "verifier_netfilter_ctx.skel.h" #include "verifier_netfilter_retcode.skel.h" #include "verifier_prevent_map_lookup.skel.h" @@ -50,12 +54,16 @@ #include "verifier_regalloc.skel.h" #include "verifier_ringbuf.skel.h" #include "verifier_runtime_jit.skel.h" +#include "verifier_scalar_ids.skel.h" +#include "verifier_sdiv.skel.h" #include "verifier_search_pruning.skel.h" #include "verifier_sock.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_spin_lock.skel.h" #include "verifier_stack_ptr.skel.h" +#include "verifier_subprog_precision.skel.h" #include "verifier_subreg.skel.h" +#include "verifier_typedef.skel.h" #include "verifier_uninit.skel.h" #include "verifier_unpriv.skel.h" #include "verifier_unpriv_perf.skel.h" @@ -110,6 +118,7 @@ void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); } +void test_verifier_bswap(void) { RUN(verifier_bswap); } void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); } void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } @@ -123,6 +132,7 @@ void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_acces void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } +void test_verifier_gotol(void) { RUN(verifier_gotol); } void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); } void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); } void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } @@ -130,6 +140,7 @@ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } +void test_verifier_ldsx(void) { RUN(verifier_ldsx); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } void test_verifier_loops1(void) { RUN(verifier_loops1); } void test_verifier_lwt(void) { RUN(verifier_lwt); } @@ -139,6 +150,7 @@ void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } +void test_verifier_movsx(void) { RUN(verifier_movsx); } void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } @@ -149,12 +161,16 @@ void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); } void test_verifier_regalloc(void) { RUN(verifier_regalloc); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } +void test_verifier_scalar_ids(void) { RUN(verifier_scalar_ids); } +void test_verifier_sdiv(void) { RUN(verifier_sdiv); } void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } +void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); } void test_verifier_subreg(void) { RUN(verifier_subreg); } +void test_verifier_typedef(void) { RUN(verifier_typedef); } void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_unpriv(void) { RUN(verifier_unpriv); } void test_verifier_unpriv_perf(void) { RUN(verifier_unpriv_perf); } diff --git a/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c b/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c new file mode 100644 index 000000000000..2a5e207edad6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * Topology: + * --------- + * NS0 namespace | NS1 namespace + * | + * +--------------+ | +--------------+ + * | veth01 |----------| veth10 | + * | 172.16.1.100 | | | 172.16.1.200 | + * | bpf | | +--------------+ + * +--------------+ | + * server(UDP/TCP) | + * +-------------------+ | + * | vrf1 | | + * | +--------------+ | | +--------------+ + * | | veth02 |----------| veth20 | + * | | 172.16.2.100 | | | | 172.16.2.200 | + * | | bpf | | | +--------------+ + * | +--------------+ | | + * | server(UDP/TCP) | | + * +-------------------+ | + * + * Test flow + * ----------- + * The tests verifies that socket lookup via TC is VRF aware: + * 1) Creates two veth pairs between NS0 and NS1: + * a) veth01 <-> veth10 outside the VRF + * b) veth02 <-> veth20 in the VRF + * 2) Attaches to veth01 and veth02 a program that calls: + * a) bpf_skc_lookup_tcp() with TCP and tcp_skc is true + * b) bpf_sk_lookup_tcp() with TCP and tcp_skc is false + * c) bpf_sk_lookup_udp() with UDP + * The program stores the lookup result in bss->lookup_status. + * 3) Creates a socket TCP/UDP server in/outside the VRF. + * 4) The test expects lookup_status to be: + * a) 0 from device in VRF to server outside VRF + * b) 0 from device outside VRF to server in VRF + * c) 1 from device in VRF to server in VRF + * d) 1 from device outside VRF to server outside VRF + */ + +#include <net/if.h> + +#include "test_progs.h" +#include "network_helpers.h" +#include "vrf_socket_lookup.skel.h" + +#define NS0 "vrf_socket_lookup_0" +#define NS1 "vrf_socket_lookup_1" + +#define IP4_ADDR_VETH01 "172.16.1.100" +#define IP4_ADDR_VETH10 "172.16.1.200" +#define IP4_ADDR_VETH02 "172.16.2.100" +#define IP4_ADDR_VETH20 "172.16.2.200" + +#define NON_VRF_PORT 5000 +#define IN_VRF_PORT 5001 + +#define TIMEOUT_MS 3000 + +static int make_socket(int sotype, const char *ip, int port, + struct sockaddr_storage *addr) +{ + int err, fd; + + err = make_sockaddr(AF_INET, ip, port, addr, NULL); + if (!ASSERT_OK(err, "make_address")) + return -1; + + fd = socket(AF_INET, sotype, 0); + if (!ASSERT_GE(fd, 0, "socket")) + return -1; + + if (!ASSERT_OK(settimeo(fd, TIMEOUT_MS), "settimeo")) + goto fail; + + return fd; +fail: + close(fd); + return -1; +} + +static int make_server(int sotype, const char *ip, int port, const char *ifname) +{ + int err, fd = -1; + + fd = start_server(AF_INET, sotype, ip, port, TIMEOUT_MS); + if (!ASSERT_GE(fd, 0, "start_server")) + return -1; + + if (ifname) { + err = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, + ifname, strlen(ifname) + 1); + if (!ASSERT_OK(err, "setsockopt(SO_BINDTODEVICE)")) + goto fail; + } + + return fd; +fail: + close(fd); + return -1; +} + +static int attach_progs(char *ifname, int tc_prog_fd, int xdp_prog_fd) +{ + LIBBPF_OPTS(bpf_tc_hook, hook, .attach_point = BPF_TC_INGRESS); + LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, + .prog_fd = tc_prog_fd); + int ret, ifindex; + + ifindex = if_nametoindex(ifname); + if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex")) + return -1; + hook.ifindex = ifindex; + + ret = bpf_tc_hook_create(&hook); + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + return ret; + + ret = bpf_tc_attach(&hook, &opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(&hook); + return ret; + } + ret = bpf_xdp_attach(ifindex, xdp_prog_fd, 0, NULL); + if (!ASSERT_OK(ret, "bpf_xdp_attach")) { + bpf_tc_hook_destroy(&hook); + return ret; + } + + return 0; +} + +static void cleanup(void) +{ + SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " + NS0); + SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " + NS1); +} + +static int setup(struct vrf_socket_lookup *skel) +{ + int tc_prog_fd, xdp_prog_fd, ret = 0; + struct nstoken *nstoken = NULL; + + SYS(fail, "ip netns add " NS0); + SYS(fail, "ip netns add " NS1); + + /* NS0 <-> NS1 [veth01 <-> veth10] */ + SYS(fail, "ip link add veth01 netns " NS0 " type veth peer name veth10" + " netns " NS1); + SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01"); + SYS(fail, "ip -net " NS0 " link set dev veth01 up"); + SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10"); + SYS(fail, "ip -net " NS1 " link set dev veth10 up"); + + /* NS0 <-> NS1 [veth02 <-> veth20] */ + SYS(fail, "ip link add veth02 netns " NS0 " type veth peer name veth20" + " netns " NS1); + SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02"); + SYS(fail, "ip -net " NS0 " link set dev veth02 up"); + SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH20 "/24 dev veth20"); + SYS(fail, "ip -net " NS1 " link set dev veth20 up"); + + /* veth02 -> vrf1 */ + SYS(fail, "ip -net " NS0 " link add vrf1 type vrf table 11"); + SYS(fail, "ip -net " NS0 " route add vrf vrf1 unreachable default" + " metric 4278198272"); + SYS(fail, "ip -net " NS0 " link set vrf1 alias vrf"); + SYS(fail, "ip -net " NS0 " link set vrf1 up"); + SYS(fail, "ip -net " NS0 " link set veth02 master vrf1"); + + /* Attach TC and XDP progs to veth devices in NS0 */ + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns " NS0)) + goto fail; + tc_prog_fd = bpf_program__fd(skel->progs.tc_socket_lookup); + if (!ASSERT_GE(tc_prog_fd, 0, "bpf_program__tc_fd")) + goto fail; + xdp_prog_fd = bpf_program__fd(skel->progs.xdp_socket_lookup); + if (!ASSERT_GE(xdp_prog_fd, 0, "bpf_program__xdp_fd")) + goto fail; + + if (attach_progs("veth01", tc_prog_fd, xdp_prog_fd)) + goto fail; + + if (attach_progs("veth02", tc_prog_fd, xdp_prog_fd)) + goto fail; + + goto close; +fail: + ret = -1; +close: + if (nstoken) + close_netns(nstoken); + return ret; +} + +static int test_lookup(struct vrf_socket_lookup *skel, int sotype, + const char *ip, int port, bool test_xdp, bool tcp_skc, + int lookup_status_exp) +{ + static const char msg[] = "Hello Server"; + struct sockaddr_storage addr = {}; + int fd, ret = 0; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + skel->bss->test_xdp = test_xdp; + skel->bss->tcp_skc = tcp_skc; + skel->bss->lookup_status = -1; + + if (sotype == SOCK_STREAM) + connect(fd, (void *)&addr, sizeof(struct sockaddr_in)); + else + sendto(fd, msg, sizeof(msg), 0, (void *)&addr, + sizeof(struct sockaddr_in)); + + if (!ASSERT_EQ(skel->bss->lookup_status, lookup_status_exp, + "lookup_status")) + goto fail; + + goto close; + +fail: + ret = -1; +close: + close(fd); + return ret; +} + +static void _test_vrf_socket_lookup(struct vrf_socket_lookup *skel, int sotype, + bool test_xdp, bool tcp_skc) +{ + int in_vrf_server = -1, non_vrf_server = -1; + struct nstoken *nstoken = NULL; + + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns " NS0)) + goto done; + + /* Open sockets in and outside VRF */ + non_vrf_server = make_server(sotype, "0.0.0.0", NON_VRF_PORT, NULL); + if (!ASSERT_GE(non_vrf_server, 0, "make_server__outside_vrf_fd")) + goto done; + + in_vrf_server = make_server(sotype, "0.0.0.0", IN_VRF_PORT, "veth02"); + if (!ASSERT_GE(in_vrf_server, 0, "make_server__in_vrf_fd")) + goto done; + + /* Perform test from NS1 */ + close_netns(nstoken); + nstoken = open_netns(NS1); + if (!ASSERT_OK_PTR(nstoken, "setns " NS1)) + goto done; + + if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, NON_VRF_PORT, + test_xdp, tcp_skc, 0), "in_to_out")) + goto done; + if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, IN_VRF_PORT, + test_xdp, tcp_skc, 1), "in_to_in")) + goto done; + if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, NON_VRF_PORT, + test_xdp, tcp_skc, 1), "out_to_out")) + goto done; + if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, IN_VRF_PORT, + test_xdp, tcp_skc, 0), "out_to_in")) + goto done; + +done: + if (non_vrf_server >= 0) + close(non_vrf_server); + if (in_vrf_server >= 0) + close(in_vrf_server); + if (nstoken) + close_netns(nstoken); +} + +void test_vrf_socket_lookup(void) +{ + struct vrf_socket_lookup *skel; + + cleanup(); + + skel = vrf_socket_lookup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "vrf_socket_lookup__open_and_load")) + return; + + if (!ASSERT_OK(setup(skel), "setup")) + goto done; + + if (test__start_subtest("tc_socket_lookup_tcp")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false); + if (test__start_subtest("tc_socket_lookup_tcp_skc")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false); + if (test__start_subtest("tc_socket_lookup_udp")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false); + if (test__start_subtest("xdp_socket_lookup_tcp")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false); + if (test__start_subtest("xdp_socket_lookup_tcp_skc")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false); + if (test__start_subtest("xdp_socket_lookup_udp")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false); + +done: + vrf_socket_lookup__destroy(skel); + cleanup(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index fa3cac5488f5..e6bcb6051402 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include "test_xdp_attach_fail.skel.h" #define IFINDEX_LO 1 #define XDP_FLAGS_REPLACE (1U << 4) @@ -85,10 +86,74 @@ out_1: bpf_object__close(obj1); } +#define ERRMSG_LEN 64 + +struct xdp_errmsg { + char msg[ERRMSG_LEN]; +}; + +static void on_xdp_errmsg(void *ctx, int cpu, void *data, __u32 size) +{ + struct xdp_errmsg *ctx_errmg = ctx, *tp_errmsg = data; + + memcpy(&ctx_errmg->msg, &tp_errmsg->msg, ERRMSG_LEN); +} + +static const char tgt_errmsg[] = "Invalid XDP flags for BPF link attachment"; + +static void test_xdp_attach_fail(const char *file) +{ + struct test_xdp_attach_fail *skel = NULL; + struct xdp_errmsg errmsg = {}; + struct perf_buffer *pb = NULL; + struct bpf_object *obj = NULL; + int err, fd_xdp; + + LIBBPF_OPTS(bpf_link_create_opts, opts); + + skel = test_xdp_attach_fail__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_attach_fail__open_and_load")) + goto out_close; + + err = test_xdp_attach_fail__attach(skel); + if (!ASSERT_EQ(err, 0, "test_xdp_attach_fail__attach")) + goto out_close; + + /* set up perf buffer */ + pb = perf_buffer__new(bpf_map__fd(skel->maps.xdp_errmsg_pb), 1, + on_xdp_errmsg, NULL, &errmsg, NULL); + if (!ASSERT_OK_PTR(pb, "perf_buffer__new")) + goto out_close; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &fd_xdp); + if (!ASSERT_EQ(err, 0, "bpf_prog_test_load")) + goto out_close; + + opts.flags = 0xFF; // invalid flags to fail to attach XDP prog + err = bpf_link_create(fd_xdp, IFINDEX_LO, BPF_XDP, &opts); + if (!ASSERT_EQ(err, -EINVAL, "bpf_link_create")) + goto out_close; + + /* read perf buffer */ + err = perf_buffer__poll(pb, 100); + if (!ASSERT_GT(err, -1, "perf_buffer__poll")) + goto out_close; + + ASSERT_STRNEQ((const char *) errmsg.msg, tgt_errmsg, + 42 /* strlen(tgt_errmsg) */, "check error message"); + +out_close: + perf_buffer__free(pb); + bpf_object__close(obj); + test_xdp_attach_fail__destroy(skel); +} + void serial_test_xdp_attach(void) { if (test__start_subtest("xdp_attach")) test_xdp_attach("./test_xdp.bpf.o"); if (test__start_subtest("xdp_attach_dynptr")) test_xdp_attach("./test_xdp_dynptr.bpf.o"); + if (test__start_subtest("xdp_attach_failed")) + test_xdp_attach_fail("./xdp_dummy.bpf.o"); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index d19f79048ff6..c3b45745cbcc 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -18,6 +18,7 @@ #include <linux/if_bonding.h> #include <linux/limits.h> #include <linux/udp.h> +#include <uapi/linux/netdev.h> #include "xdp_dummy.skel.h" #include "xdp_redirect_multi_kern.skel.h" @@ -492,6 +493,123 @@ out: system("ip link del bond_nest2"); } +static void test_xdp_bonding_features(struct skeletons *skeletons) +{ + LIBBPF_OPTS(bpf_xdp_query_opts, query_opts); + int bond_idx, veth1_idx, err; + struct bpf_link *link = NULL; + + if (!ASSERT_OK(system("ip link add bond type bond"), "add bond")) + goto out; + + bond_idx = if_nametoindex("bond"); + if (!ASSERT_GE(bond_idx, 0, "if_nametoindex bond")) + goto out; + + /* query default xdp-feature for bond device */ + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK, + "bond query_opts.feature_flags")) + goto out; + + if (!ASSERT_OK(system("ip link add veth0 type veth peer name veth1"), + "add veth{0,1} pair")) + goto out; + + if (!ASSERT_OK(system("ip link add veth2 type veth peer name veth3"), + "add veth{2,3} pair")) + goto out; + + if (!ASSERT_OK(system("ip link set veth0 master bond"), + "add veth0 to master bond")) + goto out; + + /* xdp-feature for bond device should be obtained from the single slave + * device (veth0) + */ + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + if (!ASSERT_EQ(query_opts.feature_flags, + NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG, + "bond query_opts.feature_flags")) + goto out; + + veth1_idx = if_nametoindex("veth1"); + if (!ASSERT_GE(veth1_idx, 0, "if_nametoindex veth1")) + goto out; + + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, + veth1_idx); + if (!ASSERT_OK_PTR(link, "attach program to veth1")) + goto out; + + /* xdp-feature for veth0 are changed */ + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + if (!ASSERT_EQ(query_opts.feature_flags, + NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG, + "bond query_opts.feature_flags")) + goto out; + + if (!ASSERT_OK(system("ip link set veth2 master bond"), + "add veth2 to master bond")) + goto out; + + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + /* xdp-feature for bond device should be set to the most restrict + * value obtained from attached slave devices (veth0 and veth2) + */ + if (!ASSERT_EQ(query_opts.feature_flags, + NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG, + "bond query_opts.feature_flags")) + goto out; + + if (!ASSERT_OK(system("ip link set veth2 nomaster"), + "del veth2 to master bond")) + goto out; + + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + if (!ASSERT_EQ(query_opts.feature_flags, + NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG, + "bond query_opts.feature_flags")) + goto out; + + if (!ASSERT_OK(system("ip link set veth0 nomaster"), + "del veth0 to master bond")) + goto out; + + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK, + "bond query_opts.feature_flags"); +out: + bpf_link__destroy(link); + system("ip link del veth0"); + system("ip link del veth2"); + system("ip link del bond"); +} + static int libbpf_debug_print(enum libbpf_print_level level, const char *format, va_list args) { @@ -546,6 +664,9 @@ void serial_test_xdp_bonding(void) if (test__start_subtest("xdp_bonding_nested")) test_xdp_bonding_nested(&skeletons); + if (test__start_subtest("xdp_bonding_features")) + test_xdp_bonding_features(&skeletons); + for (i = 0; i < ARRAY_SIZE(bond_test_cases); i++) { struct bond_test_case *test_case = &bond_test_cases[i]; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c b/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c new file mode 100644 index 000000000000..7dd18c6d06c6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_dev_bound_only.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <net/if.h> +#include <test_progs.h> +#include <network_helpers.h> + +#define LOCAL_NETNS "xdp_dev_bound_only_netns" + +static int load_dummy_prog(char *name, __u32 ifindex, __u32 flags) +{ + struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() }; + LIBBPF_OPTS(bpf_prog_load_opts, opts); + + opts.prog_flags = flags; + opts.prog_ifindex = ifindex; + return bpf_prog_load(BPF_PROG_TYPE_XDP, name, "GPL", insns, ARRAY_SIZE(insns), &opts); +} + +/* A test case for bpf_offload_netdev->offload handling bug: + * - create a veth device (does not support offload); + * - create a device bound XDP program with BPF_F_XDP_DEV_BOUND_ONLY flag + * (such programs are not offloaded); + * - create a device bound XDP program without flags (such programs are offloaded). + * This might lead to 'BUG: kernel NULL pointer dereference'. + */ +void test_xdp_dev_bound_only_offdev(void) +{ + struct nstoken *tok = NULL; + __u32 ifindex; + int fd1 = -1; + int fd2 = -1; + + SYS(out, "ip netns add " LOCAL_NETNS); + tok = open_netns(LOCAL_NETNS); + if (!ASSERT_OK_PTR(tok, "open_netns")) + goto out; + SYS(out, "ip link add eth42 type veth"); + ifindex = if_nametoindex("eth42"); + if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex")) { + perror("if_nametoindex"); + goto out; + } + fd1 = load_dummy_prog("dummy1", ifindex, BPF_F_XDP_DEV_BOUND_ONLY); + if (!ASSERT_GE(fd1, 0, "load_dummy_prog #1")) { + perror("load_dummy_prog #1"); + goto out; + } + /* Program with ifindex is considered offloaded, however veth + * does not support offload => error should be reported. + */ + fd2 = load_dummy_prog("dummy2", ifindex, 0); + ASSERT_EQ(fd2, -EINVAL, "load_dummy_prog #2 (offloaded)"); + +out: + close(fd1); + close(fd2); + close_netns(tok); + /* eth42 was added inside netns, removing the netns will + * also remove eth42 veth pair. + */ + SYS_NOFAIL("ip netns del " LOCAL_NETNS); +} |