diff options
| author | Thomas Zimmermann <[email protected]> | 2023-10-11 09:50:59 +0200 | 
|---|---|---|
| committer | Thomas Zimmermann <[email protected]> | 2023-10-11 09:50:59 +0200 | 
| commit | 57390019b68b83f96eb98f490367b9df1f2d77cb (patch) | |
| tree | e6d4b6c75efdd2d7fb7d37f980688c491be3ff6a /tools/testing/selftests/bpf/prog_tests | |
| parent | e5f9d543419c78ac58f3b3557bc5a76b20ff600b (diff) | |
| parent | 389af786f92ecdff35883551d54bf4e507ffcccb (diff) | |
Merge drm/drm-next into drm-misc-next
Updating drm-misc-next to the state of Linux v6.6-rc2.
Signed-off-by: Thomas Zimmermann <[email protected]>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
42 files changed, 7794 insertions, 103 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/assign_reuse.c b/tools/testing/selftests/bpf/prog_tests/assign_reuse.c new file mode 100644 index 000000000000..989ee4d9785b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/assign_reuse.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include <uapi/linux/if_link.h> +#include <test_progs.h> + +#include <netinet/tcp.h> +#include <netinet/udp.h> + +#include "network_helpers.h" +#include "test_assign_reuse.skel.h" + +#define NS_TEST "assign_reuse" +#define LOOPBACK 1 +#define PORT 4443 + +static int attach_reuseport(int sock_fd, int prog_fd) +{ +	return setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, +			  &prog_fd, sizeof(prog_fd)); +} + +static __u64 cookie(int fd) +{ +	__u64 cookie = 0; +	socklen_t cookie_len = sizeof(cookie); +	int ret; + +	ret = getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len); +	ASSERT_OK(ret, "cookie"); +	ASSERT_GT(cookie, 0, "cookie_invalid"); + +	return cookie; +} + +static int echo_test_udp(int fd_sv) +{ +	struct sockaddr_storage addr = {}; +	socklen_t len = sizeof(addr); +	char buff[1] = {}; +	int fd_cl = -1, ret; + +	fd_cl = connect_to_fd(fd_sv, 100); +	ASSERT_GT(fd_cl, 0, "create_client"); +	ASSERT_EQ(getsockname(fd_cl, (void *)&addr, &len), 0, "getsockname"); + +	ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client"); + +	ret = recv(fd_sv, buff, sizeof(buff), 0); +	if (ret < 0) { +		close(fd_cl); +		return errno; +	} + +	ASSERT_EQ(ret, 1, "recv_server"); +	ASSERT_EQ(sendto(fd_sv, buff, sizeof(buff), 0, (void *)&addr, len), 1, "send_server"); +	ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client"); +	close(fd_cl); +	return 0; +} + +static int echo_test_tcp(int fd_sv) +{ +	char buff[1] = {}; +	int fd_cl = -1, fd_sv_cl = -1; + +	fd_cl = connect_to_fd(fd_sv, 100); +	if (fd_cl < 0) +		return errno; + +	fd_sv_cl = accept(fd_sv, NULL, NULL); +	ASSERT_GE(fd_sv_cl, 0, "accept_fd"); + +	ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client"); +	ASSERT_EQ(recv(fd_sv_cl, buff, sizeof(buff), 0), 1, "recv_server"); +	ASSERT_EQ(send(fd_sv_cl, buff, sizeof(buff), 0), 1, "send_server"); +	ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client"); +	close(fd_sv_cl); +	close(fd_cl); +	return 0; +} + +void run_assign_reuse(int family, int sotype, const char *ip, __u16 port) +{ +	DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, +		.ifindex = LOOPBACK, +		.attach_point = BPF_TC_INGRESS, +	); +	DECLARE_LIBBPF_OPTS(bpf_tc_opts, tc_opts, +		.handle = 1, +		.priority = 1, +	); +	bool hook_created = false, tc_attached = false; +	int ret, fd_tc, fd_accept, fd_drop, fd_map; +	int *fd_sv = NULL; +	__u64 fd_val; +	struct test_assign_reuse *skel; +	const int zero = 0; + +	skel = test_assign_reuse__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	skel->rodata->dest_port = port; + +	ret = test_assign_reuse__load(skel); +	if (!ASSERT_OK(ret, "skel_load")) +		goto cleanup; + +	ASSERT_EQ(skel->bss->sk_cookie_seen, 0, "cookie_init"); + +	fd_tc = bpf_program__fd(skel->progs.tc_main); +	fd_accept = bpf_program__fd(skel->progs.reuse_accept); +	fd_drop = bpf_program__fd(skel->progs.reuse_drop); +	fd_map = bpf_map__fd(skel->maps.sk_map); + +	fd_sv = start_reuseport_server(family, sotype, ip, port, 100, 1); +	if (!ASSERT_NEQ(fd_sv, NULL, "start_reuseport_server")) +		goto cleanup; + +	ret = attach_reuseport(*fd_sv, fd_drop); +	if (!ASSERT_OK(ret, "attach_reuseport")) +		goto cleanup; + +	fd_val = *fd_sv; +	ret = bpf_map_update_elem(fd_map, &zero, &fd_val, BPF_NOEXIST); +	if (!ASSERT_OK(ret, "bpf_sk_map")) +		goto cleanup; + +	ret = bpf_tc_hook_create(&tc_hook); +	if (ret == 0) +		hook_created = true; +	ret = ret == -EEXIST ? 0 : ret; +	if (!ASSERT_OK(ret, "bpf_tc_hook_create")) +		goto cleanup; + +	tc_opts.prog_fd = fd_tc; +	ret = bpf_tc_attach(&tc_hook, &tc_opts); +	if (!ASSERT_OK(ret, "bpf_tc_attach")) +		goto cleanup; +	tc_attached = true; + +	if (sotype == SOCK_STREAM) +		ASSERT_EQ(echo_test_tcp(*fd_sv), ECONNREFUSED, "drop_tcp"); +	else +		ASSERT_EQ(echo_test_udp(*fd_sv), EAGAIN, "drop_udp"); +	ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once"); + +	skel->bss->sk_cookie_seen = 0; +	skel->bss->reuseport_executed = 0; +	ASSERT_OK(attach_reuseport(*fd_sv, fd_accept), "attach_reuseport(accept)"); + +	if (sotype == SOCK_STREAM) +		ASSERT_EQ(echo_test_tcp(*fd_sv), 0, "echo_tcp"); +	else +		ASSERT_EQ(echo_test_udp(*fd_sv), 0, "echo_udp"); + +	ASSERT_EQ(skel->bss->sk_cookie_seen, cookie(*fd_sv), +		  "cookie_mismatch"); +	ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once"); +cleanup: +	if (tc_attached) { +		tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; +		ret = bpf_tc_detach(&tc_hook, &tc_opts); +		ASSERT_OK(ret, "bpf_tc_detach"); +	} +	if (hook_created) { +		tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; +		bpf_tc_hook_destroy(&tc_hook); +	} +	test_assign_reuse__destroy(skel); +	free_fds(fd_sv, 1); +} + +void test_assign_reuse(void) +{ +	struct nstoken *tok = NULL; + +	SYS(out, "ip netns add %s", NS_TEST); +	SYS(cleanup, "ip -net %s link set dev lo up", NS_TEST); + +	tok = open_netns(NS_TEST); +	if (!ASSERT_OK_PTR(tok, "netns token")) +		return; + +	if (test__start_subtest("tcpv4")) +		run_assign_reuse(AF_INET, SOCK_STREAM, "127.0.0.1", PORT); +	if (test__start_subtest("tcpv6")) +		run_assign_reuse(AF_INET6, SOCK_STREAM, "::1", PORT); +	if (test__start_subtest("udpv4")) +		run_assign_reuse(AF_INET, SOCK_DGRAM, "127.0.0.1", PORT); +	if (test__start_subtest("udpv6")) +		run_assign_reuse(AF_INET6, SOCK_DGRAM, "::1", PORT); + +cleanup: +	close_netns(tok); +	SYS_NOFAIL("ip netns delete %s", NS_TEST); +out: +	return; +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 26b2d1bffdfd..1454cebc262b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -11,6 +11,7 @@  #include <bpf/btf.h>  #include "test_bpf_cookie.skel.h"  #include "kprobe_multi.skel.h" +#include "uprobe_multi.skel.h"  /* uprobe attach point */  static noinline void trigger_func(void) @@ -239,6 +240,81 @@ cleanup:  	bpf_link__destroy(link1);  	kprobe_multi__destroy(skel);  } + +/* defined in prog_tests/uprobe_multi_test.c */ +void uprobe_multi_func_1(void); +void uprobe_multi_func_2(void); +void uprobe_multi_func_3(void); + +static void uprobe_multi_test_run(struct uprobe_multi *skel) +{ +	skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; +	skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; +	skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + +	skel->bss->pid = getpid(); +	skel->bss->test_cookie = true; + +	uprobe_multi_func_1(); +	uprobe_multi_func_2(); +	uprobe_multi_func_3(); + +	ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result"); +	ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result"); +	ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result"); + +	ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result"); +	ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result"); +	ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result"); +} + +static void uprobe_multi_attach_api_subtest(void) +{ +	struct bpf_link *link1 = NULL, *link2 = NULL; +	struct uprobe_multi *skel = NULL; +	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); +	const char *syms[3] = { +		"uprobe_multi_func_1", +		"uprobe_multi_func_2", +		"uprobe_multi_func_3", +	}; +	__u64 cookies[3]; + +	cookies[0] = 3; /* uprobe_multi_func_1 */ +	cookies[1] = 1; /* uprobe_multi_func_2 */ +	cookies[2] = 2; /* uprobe_multi_func_3 */ + +	opts.syms = syms; +	opts.cnt = ARRAY_SIZE(syms); +	opts.cookies = &cookies[0]; + +	skel = uprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "uprobe_multi")) +		goto cleanup; + +	link1 = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1, +						 "/proc/self/exe", NULL, &opts); +	if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) +		goto cleanup; + +	cookies[0] = 2; /* uprobe_multi_func_1 */ +	cookies[1] = 3; /* uprobe_multi_func_2 */ +	cookies[2] = 1; /* uprobe_multi_func_3 */ + +	opts.retprobe = true; +	link2 = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, -1, +						      "/proc/self/exe", NULL, &opts); +	if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) +		goto cleanup; + +	uprobe_multi_test_run(skel); + +cleanup: +	bpf_link__destroy(link2); +	bpf_link__destroy(link1); +	uprobe_multi__destroy(skel); +} +  static void uprobe_subtest(struct test_bpf_cookie *skel)  {  	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); @@ -515,6 +591,8 @@ void test_bpf_cookie(void)  		kprobe_multi_attach_api_subtest();  	if (test__start_subtest("uprobe"))  		uprobe_subtest(skel); +	if (test__start_subtest("multi_uprobe_attach_api")) +		uprobe_multi_attach_api_subtest();  	if (test__start_subtest("tracepoint"))  		tp_subtest(skel);  	if (test__start_subtest("perf_event")) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index c8ba4009e4ab..b30ff6b3b81a 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -123,12 +123,13 @@ static void test_bpf_nf_ct(int mode)  	ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting");  	ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting");  end: -	if (srv_client_fd != -1) -		close(srv_client_fd);  	if (client_fd != -1)  		close(client_fd); +	if (srv_client_fd != -1) +		close(srv_client_fd);  	if (srv_fd != -1)  		close(srv_fd); +  	snprintf(cmd, sizeof(cmd), iptables, "-D");  	system(cmd);  	test_bpf_nf__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c index 31f1e815f671..ee0458a5ce78 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c @@ -8,6 +8,7 @@  #include <linux/unistd.h>  #include <linux/mount.h>  #include <sys/syscall.h> +#include "bpf/libbpf_internal.h"  static inline int sys_fsopen(const char *fsname, unsigned flags)  { @@ -155,7 +156,7 @@ static void validate_pin(int map_fd, const char *map_name, int src_value,  	ASSERT_OK(err, "obj_pin");  	/* cleanup */ -	if (pin_opts.path_fd >= 0) +	if (path_kind == PATH_FD_REL && pin_opts.path_fd >= 0)  		close(pin_opts.path_fd);  	if (old_cwd[0])  		ASSERT_OK(chdir(old_cwd), "restore_cwd"); @@ -220,7 +221,7 @@ static void validate_get(int map_fd, const char *map_name, int src_value,  		goto cleanup;  	/* cleanup */ -	if (get_opts.path_fd >= 0) +	if (path_kind == PATH_FD_REL && get_opts.path_fd >= 0)  		close(get_opts.path_fd);  	if (old_cwd[0])  		ASSERT_OK(chdir(old_cwd), "restore_cwd"); diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c b/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c new file mode 100644 index 000000000000..a1542faf7873 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_tcp_skb.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Facebook */ +#include <test_progs.h> +#include <linux/in6.h> +#include <sys/socket.h> +#include <sched.h> +#include <unistd.h> +#include "cgroup_helpers.h" +#include "testing_helpers.h" +#include "cgroup_tcp_skb.skel.h" +#include "cgroup_tcp_skb.h" +#include "network_helpers.h" + +#define CGROUP_TCP_SKB_PATH "/test_cgroup_tcp_skb" + +static int install_filters(int cgroup_fd, +			   struct bpf_link **egress_link, +			   struct bpf_link **ingress_link, +			   struct bpf_program *egress_prog, +			   struct bpf_program *ingress_prog, +			   struct cgroup_tcp_skb *skel) +{ +	/* Prepare filters */ +	skel->bss->g_sock_state = 0; +	skel->bss->g_unexpected = 0; +	*egress_link = +		bpf_program__attach_cgroup(egress_prog, +					   cgroup_fd); +	if (!ASSERT_OK_PTR(egress_link, "egress_link")) +		return -1; +	*ingress_link = +		bpf_program__attach_cgroup(ingress_prog, +					   cgroup_fd); +	if (!ASSERT_OK_PTR(ingress_link, "ingress_link")) +		return -1; + +	return 0; +} + +static void uninstall_filters(struct bpf_link **egress_link, +			      struct bpf_link **ingress_link) +{ +	bpf_link__destroy(*egress_link); +	*egress_link = NULL; +	bpf_link__destroy(*ingress_link); +	*ingress_link = NULL; +} + +static int create_client_sock_v6(void) +{ +	int fd; + +	fd = socket(AF_INET6, SOCK_STREAM, 0); +	if (fd < 0) { +		perror("socket"); +		return -1; +	} + +	return fd; +} + +/* Connect to the server in a cgroup from the outside of the cgroup. */ +static int talk_to_cgroup(int *client_fd, int *listen_fd, int *service_fd, +			  struct cgroup_tcp_skb *skel) +{ +	int err, cp; +	char buf[5]; +	int port; + +	/* Create client & server socket */ +	err = join_root_cgroup(); +	if (!ASSERT_OK(err, "join_root_cgroup")) +		return -1; +	*client_fd = create_client_sock_v6(); +	if (!ASSERT_GE(*client_fd, 0, "client_fd")) +		return -1; +	err = join_cgroup(CGROUP_TCP_SKB_PATH); +	if (!ASSERT_OK(err, "join_cgroup")) +		return -1; +	*listen_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); +	if (!ASSERT_GE(*listen_fd, 0, "listen_fd")) +		return -1; +	port = get_socket_local_port(*listen_fd); +	if (!ASSERT_GE(port, 0, "get_socket_local_port")) +		return -1; +	skel->bss->g_sock_port = ntohs(port); + +	/* Connect client to server */ +	err = connect_fd_to_fd(*client_fd, *listen_fd, 0); +	if (!ASSERT_OK(err, "connect_fd_to_fd")) +		return -1; +	*service_fd = accept(*listen_fd, NULL, NULL); +	if (!ASSERT_GE(*service_fd, 0, "service_fd")) +		return -1; +	err = join_root_cgroup(); +	if (!ASSERT_OK(err, "join_root_cgroup")) +		return -1; +	cp = write(*client_fd, "hello", 5); +	if (!ASSERT_EQ(cp, 5, "write")) +		return -1; +	cp = read(*service_fd, buf, 5); +	if (!ASSERT_EQ(cp, 5, "read")) +		return -1; + +	return 0; +} + +/* Connect to the server out of a cgroup from inside the cgroup. */ +static int talk_to_outside(int *client_fd, int *listen_fd, int *service_fd, +			   struct cgroup_tcp_skb *skel) + +{ +	int err, cp; +	char buf[5]; +	int port; + +	/* Create client & server socket */ +	err = join_root_cgroup(); +	if (!ASSERT_OK(err, "join_root_cgroup")) +		return -1; +	*listen_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); +	if (!ASSERT_GE(*listen_fd, 0, "listen_fd")) +		return -1; +	err = join_cgroup(CGROUP_TCP_SKB_PATH); +	if (!ASSERT_OK(err, "join_cgroup")) +		return -1; +	*client_fd = create_client_sock_v6(); +	if (!ASSERT_GE(*client_fd, 0, "client_fd")) +		return -1; +	err = join_root_cgroup(); +	if (!ASSERT_OK(err, "join_root_cgroup")) +		return -1; +	port = get_socket_local_port(*listen_fd); +	if (!ASSERT_GE(port, 0, "get_socket_local_port")) +		return -1; +	skel->bss->g_sock_port = ntohs(port); + +	/* Connect client to server */ +	err = connect_fd_to_fd(*client_fd, *listen_fd, 0); +	if (!ASSERT_OK(err, "connect_fd_to_fd")) +		return -1; +	*service_fd = accept(*listen_fd, NULL, NULL); +	if (!ASSERT_GE(*service_fd, 0, "service_fd")) +		return -1; +	cp = write(*client_fd, "hello", 5); +	if (!ASSERT_EQ(cp, 5, "write")) +		return -1; +	cp = read(*service_fd, buf, 5); +	if (!ASSERT_EQ(cp, 5, "read")) +		return -1; + +	return 0; +} + +static int close_connection(int *closing_fd, int *peer_fd, int *listen_fd, +			    struct cgroup_tcp_skb *skel) +{ +	__u32 saved_packet_count = 0; +	int err; +	int i; + +	/* Wait for ACKs to be sent */ +	saved_packet_count = skel->bss->g_packet_count; +	usleep(100000);		/* 0.1s */ +	for (i = 0; +	     skel->bss->g_packet_count != saved_packet_count && i < 10; +	     i++) { +		saved_packet_count = skel->bss->g_packet_count; +		usleep(100000);	/* 0.1s */ +	} +	if (!ASSERT_EQ(skel->bss->g_packet_count, saved_packet_count, +		       "packet_count")) +		return -1; + +	skel->bss->g_packet_count = 0; +	saved_packet_count = 0; + +	/* Half shutdown to make sure the closing socket having a chance to +	 * receive a FIN from the peer. +	 */ +	err = shutdown(*closing_fd, SHUT_WR); +	if (!ASSERT_OK(err, "shutdown closing_fd")) +		return -1; + +	/* Wait for FIN and the ACK of the FIN to be observed */ +	for (i = 0; +	     skel->bss->g_packet_count < saved_packet_count + 2 && i < 10; +	     i++) +		usleep(100000);	/* 0.1s */ +	if (!ASSERT_GE(skel->bss->g_packet_count, saved_packet_count + 2, +		       "packet_count")) +		return -1; + +	saved_packet_count = skel->bss->g_packet_count; + +	/* Fully shutdown the connection */ +	err = close(*peer_fd); +	if (!ASSERT_OK(err, "close peer_fd")) +		return -1; +	*peer_fd = -1; + +	/* Wait for FIN and the ACK of the FIN to be observed */ +	for (i = 0; +	     skel->bss->g_packet_count < saved_packet_count + 2 && i < 10; +	     i++) +		usleep(100000);	/* 0.1s */ +	if (!ASSERT_GE(skel->bss->g_packet_count, saved_packet_count + 2, +		       "packet_count")) +		return -1; + +	err = close(*closing_fd); +	if (!ASSERT_OK(err, "close closing_fd")) +		return -1; +	*closing_fd = -1; + +	close(*listen_fd); +	*listen_fd = -1; + +	return 0; +} + +/* This test case includes four scenarios: + * 1. Connect to the server from outside the cgroup and close the connection + *    from outside the cgroup. + * 2. Connect to the server from outside the cgroup and close the connection + *    from inside the cgroup. + * 3. Connect to the server from inside the cgroup and close the connection + *    from outside the cgroup. + * 4. Connect to the server from inside the cgroup and close the connection + *    from inside the cgroup. + * + * The test case is to verify that cgroup_skb/{egress,ingress} filters + * receive expected packets including SYN, SYN/ACK, ACK, FIN, and FIN/ACK. + */ +void test_cgroup_tcp_skb(void) +{ +	struct bpf_link *ingress_link = NULL; +	struct bpf_link *egress_link = NULL; +	int client_fd = -1, listen_fd = -1; +	struct cgroup_tcp_skb *skel; +	int service_fd = -1; +	int cgroup_fd = -1; +	int err; + +	skel = cgroup_tcp_skb__open_and_load(); +	if (!ASSERT_OK(!skel, "skel_open_load")) +		return; + +	err = setup_cgroup_environment(); +	if (!ASSERT_OK(err, "setup_cgroup_environment")) +		goto cleanup; + +	cgroup_fd = create_and_get_cgroup(CGROUP_TCP_SKB_PATH); +	if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd")) +		goto cleanup; + +	/* Scenario 1 */ +	err = install_filters(cgroup_fd, &egress_link, &ingress_link, +			      skel->progs.server_egress, +			      skel->progs.server_ingress, +			      skel); +	if (!ASSERT_OK(err, "install_filters")) +		goto cleanup; + +	err = talk_to_cgroup(&client_fd, &listen_fd, &service_fd, skel); +	if (!ASSERT_OK(err, "talk_to_cgroup")) +		goto cleanup; + +	err = close_connection(&client_fd, &service_fd, &listen_fd, skel); +	if (!ASSERT_OK(err, "close_connection")) +		goto cleanup; + +	ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected"); +	ASSERT_EQ(skel->bss->g_sock_state, CLOSED, "g_sock_state"); + +	uninstall_filters(&egress_link, &ingress_link); + +	/* Scenario 2 */ +	err = install_filters(cgroup_fd, &egress_link, &ingress_link, +			      skel->progs.server_egress_srv, +			      skel->progs.server_ingress_srv, +			      skel); + +	err = talk_to_cgroup(&client_fd, &listen_fd, &service_fd, skel); +	if (!ASSERT_OK(err, "talk_to_cgroup")) +		goto cleanup; + +	err = close_connection(&service_fd, &client_fd, &listen_fd, skel); +	if (!ASSERT_OK(err, "close_connection")) +		goto cleanup; + +	ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected"); +	ASSERT_EQ(skel->bss->g_sock_state, TIME_WAIT, "g_sock_state"); + +	uninstall_filters(&egress_link, &ingress_link); + +	/* Scenario 3 */ +	err = install_filters(cgroup_fd, &egress_link, &ingress_link, +			      skel->progs.client_egress_srv, +			      skel->progs.client_ingress_srv, +			      skel); + +	err = talk_to_outside(&client_fd, &listen_fd, &service_fd, skel); +	if (!ASSERT_OK(err, "talk_to_outside")) +		goto cleanup; + +	err = close_connection(&service_fd, &client_fd, &listen_fd, skel); +	if (!ASSERT_OK(err, "close_connection")) +		goto cleanup; + +	ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected"); +	ASSERT_EQ(skel->bss->g_sock_state, CLOSED, "g_sock_state"); + +	uninstall_filters(&egress_link, &ingress_link); + +	/* Scenario 4 */ +	err = install_filters(cgroup_fd, &egress_link, &ingress_link, +			      skel->progs.client_egress, +			      skel->progs.client_ingress, +			      skel); + +	err = talk_to_outside(&client_fd, &listen_fd, &service_fd, skel); +	if (!ASSERT_OK(err, "talk_to_outside")) +		goto cleanup; + +	err = close_connection(&client_fd, &service_fd, &listen_fd, skel); +	if (!ASSERT_OK(err, "close_connection")) +		goto cleanup; + +	ASSERT_EQ(skel->bss->g_unexpected, 0, "g_unexpected"); +	ASSERT_EQ(skel->bss->g_sock_state, TIME_WAIT, "g_sock_state"); + +	uninstall_filters(&egress_link, &ingress_link); + +cleanup: +	close(client_fd); +	close(listen_fd); +	close(service_fd); +	close(cgroup_fd); +	bpf_link__destroy(egress_link); +	bpf_link__destroy(ingress_link); +	cleanup_cgroup_environment(); +	cgroup_tcp_skb__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/d_path.c b/tools/testing/selftests/bpf/prog_tests/d_path.c index 911345c526e6..ccc768592e66 100644 --- a/tools/testing/selftests/bpf/prog_tests/d_path.c +++ b/tools/testing/selftests/bpf/prog_tests/d_path.c @@ -12,6 +12,17 @@  #include "test_d_path_check_rdonly_mem.skel.h"  #include "test_d_path_check_types.skel.h" +/* sys_close_range is not around for long time, so let's + * make sure we can call it on systems with older glibc + */ +#ifndef __NR_close_range +#ifdef __alpha__ +#define __NR_close_range 546 +#else +#define __NR_close_range 436 +#endif +#endif +  static int duration;  static struct { @@ -90,7 +101,11 @@ static int trigger_fstat_events(pid_t pid)  	fstat(indicatorfd, &fileStat);  out_close: -	/* triggers filp_close */ +	/* sys_close no longer triggers filp_close, but we can +	 * call sys_close_range instead which still does +	 */ +#define close(fd) syscall(__NR_close_range, fd, fd, 0) +  	close(pipefd[0]);  	close(pipefd[1]);  	close(sockfd); @@ -98,6 +113,8 @@ out_close:  	close(devfd);  	close(localfd);  	close(indicatorfd); + +#undef close  	return ret;  } diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c index c0d1d61d5f66..aee1bc77a17f 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -2,8 +2,9 @@  /* Copyright (c) 2019 Facebook */  #include <test_progs.h>  #include "fentry_test.lskel.h" +#include "fentry_many_args.skel.h" -static int fentry_test(struct fentry_test_lskel *fentry_skel) +static int fentry_test_common(struct fentry_test_lskel *fentry_skel)  {  	int err, prog_fd, i;  	int link_fd; @@ -37,7 +38,7 @@ static int fentry_test(struct fentry_test_lskel *fentry_skel)  	return 0;  } -void test_fentry_test(void) +static void fentry_test(void)  {  	struct fentry_test_lskel *fentry_skel = NULL;  	int err; @@ -46,13 +47,47 @@ void test_fentry_test(void)  	if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load"))  		goto cleanup; -	err = fentry_test(fentry_skel); +	err = fentry_test_common(fentry_skel);  	if (!ASSERT_OK(err, "fentry_first_attach"))  		goto cleanup; -	err = fentry_test(fentry_skel); +	err = fentry_test_common(fentry_skel);  	ASSERT_OK(err, "fentry_second_attach");  cleanup:  	fentry_test_lskel__destroy(fentry_skel);  } + +static void fentry_many_args(void) +{ +	struct fentry_many_args *fentry_skel = NULL; +	int err; + +	fentry_skel = fentry_many_args__open_and_load(); +	if (!ASSERT_OK_PTR(fentry_skel, "fentry_many_args_skel_load")) +		goto cleanup; + +	err = fentry_many_args__attach(fentry_skel); +	if (!ASSERT_OK(err, "fentry_many_args_attach")) +		goto cleanup; + +	ASSERT_OK(trigger_module_test_read(1), "trigger_read"); + +	ASSERT_EQ(fentry_skel->bss->test1_result, 1, +		  "fentry_many_args_result1"); +	ASSERT_EQ(fentry_skel->bss->test2_result, 1, +		  "fentry_many_args_result2"); +	ASSERT_EQ(fentry_skel->bss->test3_result, 1, +		  "fentry_many_args_result3"); + +cleanup: +	fentry_many_args__destroy(fentry_skel); +} + +void test_fentry_test(void) +{ +	if (test__start_subtest("fentry")) +		fentry_test(); +	if (test__start_subtest("fentry_many_args")) +		fentry_many_args(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c index 101b7343036b..1c13007e37dd 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -2,8 +2,9 @@  /* Copyright (c) 2019 Facebook */  #include <test_progs.h>  #include "fexit_test.lskel.h" +#include "fexit_many_args.skel.h" -static int fexit_test(struct fexit_test_lskel *fexit_skel) +static int fexit_test_common(struct fexit_test_lskel *fexit_skel)  {  	int err, prog_fd, i;  	int link_fd; @@ -37,7 +38,7 @@ static int fexit_test(struct fexit_test_lskel *fexit_skel)  	return 0;  } -void test_fexit_test(void) +static void fexit_test(void)  {  	struct fexit_test_lskel *fexit_skel = NULL;  	int err; @@ -46,13 +47,47 @@ void test_fexit_test(void)  	if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load"))  		goto cleanup; -	err = fexit_test(fexit_skel); +	err = fexit_test_common(fexit_skel);  	if (!ASSERT_OK(err, "fexit_first_attach"))  		goto cleanup; -	err = fexit_test(fexit_skel); +	err = fexit_test_common(fexit_skel);  	ASSERT_OK(err, "fexit_second_attach");  cleanup:  	fexit_test_lskel__destroy(fexit_skel);  } + +static void fexit_many_args(void) +{ +	struct fexit_many_args *fexit_skel = NULL; +	int err; + +	fexit_skel = fexit_many_args__open_and_load(); +	if (!ASSERT_OK_PTR(fexit_skel, "fexit_many_args_skel_load")) +		goto cleanup; + +	err = fexit_many_args__attach(fexit_skel); +	if (!ASSERT_OK(err, "fexit_many_args_attach")) +		goto cleanup; + +	ASSERT_OK(trigger_module_test_read(1), "trigger_read"); + +	ASSERT_EQ(fexit_skel->bss->test1_result, 1, +		  "fexit_many_args_result1"); +	ASSERT_EQ(fexit_skel->bss->test2_result, 1, +		  "fexit_many_args_result2"); +	ASSERT_EQ(fexit_skel->bss->test3_result, 1, +		  "fexit_many_args_result3"); + +cleanup: +	fexit_many_args__destroy(fexit_skel); +} + +void test_fexit_test(void) +{ +	if (test__start_subtest("fexit")) +		fexit_test(); +	if (test__start_subtest("fexit_many_args")) +		fexit_many_args(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c new file mode 100644 index 000000000000..9d768e083714 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Yafang Shao <[email protected]> */ + +#include <string.h> +#include <linux/bpf.h> +#include <linux/limits.h> +#include <test_progs.h> +#include "trace_helpers.h" +#include "test_fill_link_info.skel.h" + +#define TP_CAT "sched" +#define TP_NAME "sched_switch" + +static const char *kmulti_syms[] = { +	"bpf_fentry_test2", +	"bpf_fentry_test1", +	"bpf_fentry_test3", +}; +#define KMULTI_CNT ARRAY_SIZE(kmulti_syms) +static __u64 kmulti_addrs[KMULTI_CNT]; + +#define KPROBE_FUNC "bpf_fentry_test1" +static __u64 kprobe_addr; + +#define UPROBE_FILE "/proc/self/exe" +static ssize_t uprobe_offset; +/* uprobe attach point */ +static noinline void uprobe_func(void) +{ +	asm volatile (""); +} + +static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr, +				 ssize_t offset, ssize_t entry_offset) +{ +	struct bpf_link_info info; +	__u32 len = sizeof(info); +	char buf[PATH_MAX]; +	int err; + +	memset(&info, 0, sizeof(info)); +	buf[0] = '\0'; + +again: +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	if (!ASSERT_OK(err, "get_link_info")) +		return -1; + +	if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type")) +		return -1; +	if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match")) +		return -1; + +	switch (info.perf_event.type) { +	case BPF_PERF_EVENT_KPROBE: +	case BPF_PERF_EVENT_KRETPROBE: +		ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset"); + +		/* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */ +		if (addr) +			ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset, +				  "kprobe_addr"); + +		if (!info.perf_event.kprobe.func_name) { +			ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len"); +			info.perf_event.kprobe.func_name = ptr_to_u64(&buf); +			info.perf_event.kprobe.name_len = sizeof(buf); +			goto again; +		} + +		err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC, +			      strlen(KPROBE_FUNC)); +		ASSERT_EQ(err, 0, "cmp_kprobe_func_name"); +		break; +	case BPF_PERF_EVENT_TRACEPOINT: +		if (!info.perf_event.tracepoint.tp_name) { +			ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len"); +			info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf); +			info.perf_event.tracepoint.name_len = sizeof(buf); +			goto again; +		} + +		err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME, +			      strlen(TP_NAME)); +		ASSERT_EQ(err, 0, "cmp_tp_name"); +		break; +	case BPF_PERF_EVENT_UPROBE: +	case BPF_PERF_EVENT_URETPROBE: +		ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset"); + +		if (!info.perf_event.uprobe.file_name) { +			ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len"); +			info.perf_event.uprobe.file_name = ptr_to_u64(&buf); +			info.perf_event.uprobe.name_len = sizeof(buf); +			goto again; +		} + +		err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE, +			      strlen(UPROBE_FILE)); +			ASSERT_EQ(err, 0, "cmp_file_name"); +		break; +	default: +		err = -1; +		break; +	} +	return err; +} + +static void kprobe_fill_invalid_user_buffer(int fd) +{ +	struct bpf_link_info info; +	__u32 len = sizeof(info); +	int err; + +	memset(&info, 0, sizeof(info)); + +	info.perf_event.kprobe.func_name = 0x1; /* invalid address */ +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len"); + +	info.perf_event.kprobe.name_len = 64; +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -EFAULT, "invalid_buff"); + +	info.perf_event.kprobe.func_name = 0; +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -EINVAL, "invalid_len"); + +	ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr"); +	ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset"); +	ASSERT_EQ(info.perf_event.type, 0, "type"); +} + +static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, +				       enum bpf_perf_event_type type, +				       bool invalid) +{ +	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, +		.attach_mode = PROBE_ATTACH_MODE_LINK, +		.retprobe = type == BPF_PERF_EVENT_KRETPROBE, +	); +	ssize_t entry_offset = 0; +	int link_fd, err; + +	skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, +								 KPROBE_FUNC, &opts); +	if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe")) +		return; + +	link_fd = bpf_link__fd(skel->links.kprobe_run); +	if (!invalid) { +		/* See also arch_adjust_kprobe_addr(). */ +		if (skel->kconfig->CONFIG_X86_KERNEL_IBT) +			entry_offset = 4; +		err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset); +		ASSERT_OK(err, "verify_perf_link_info"); +	} else { +		kprobe_fill_invalid_user_buffer(link_fd); +	} +	bpf_link__detach(skel->links.kprobe_run); +} + +static void test_tp_fill_link_info(struct test_fill_link_info *skel) +{ +	int link_fd, err; + +	skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME); +	if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp")) +		return; + +	link_fd = bpf_link__fd(skel->links.tp_run); +	err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0); +	ASSERT_OK(err, "verify_perf_link_info"); +	bpf_link__detach(skel->links.tp_run); +} + +static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, +				       enum bpf_perf_event_type type) +{ +	int link_fd, err; + +	skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run, +							    type == BPF_PERF_EVENT_URETPROBE, +							    0, /* self pid */ +							    UPROBE_FILE, uprobe_offset); +	if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe")) +		return; + +	link_fd = bpf_link__fd(skel->links.uprobe_run); +	err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0); +	ASSERT_OK(err, "verify_perf_link_info"); +	bpf_link__detach(skel->links.uprobe_run); +} + +static int verify_kmulti_link_info(int fd, bool retprobe) +{ +	struct bpf_link_info info; +	__u32 len = sizeof(info); +	__u64 addrs[KMULTI_CNT]; +	int flags, i, err; + +	memset(&info, 0, sizeof(info)); + +again: +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	if (!ASSERT_OK(err, "get_link_info")) +		return -1; + +	if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type")) +		return -1; + +	ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt"); +	flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN; +	if (!retprobe) +		ASSERT_EQ(flags, 0, "kmulti_flags"); +	else +		ASSERT_NEQ(flags, 0, "kretmulti_flags"); + +	if (!info.kprobe_multi.addrs) { +		info.kprobe_multi.addrs = ptr_to_u64(addrs); +		goto again; +	} +	for (i = 0; i < KMULTI_CNT; i++) +		ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); +	return 0; +} + +static void verify_kmulti_invalid_user_buffer(int fd) +{ +	struct bpf_link_info info; +	__u32 len = sizeof(info); +	__u64 addrs[KMULTI_CNT]; +	int err, i; + +	memset(&info, 0, sizeof(info)); + +	info.kprobe_multi.count = KMULTI_CNT; +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -EINVAL, "no_addr"); + +	info.kprobe_multi.addrs = ptr_to_u64(addrs); +	info.kprobe_multi.count = 0; +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -EINVAL, "no_cnt"); + +	for (i = 0; i < KMULTI_CNT; i++) +		addrs[i] = 0; +	info.kprobe_multi.count = KMULTI_CNT - 1; +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -ENOSPC, "smaller_cnt"); +	for (i = 0; i < KMULTI_CNT - 1; i++) +		ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); +	ASSERT_EQ(addrs[i], 0, "kmulti_addrs"); + +	for (i = 0; i < KMULTI_CNT; i++) +		addrs[i] = 0; +	info.kprobe_multi.count = KMULTI_CNT + 1; +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, 0, "bigger_cnt"); +	for (i = 0; i < KMULTI_CNT; i++) +		ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); + +	info.kprobe_multi.count = KMULTI_CNT; +	info.kprobe_multi.addrs = 0x1; /* invalid addr */ +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -EFAULT, "invalid_buff"); +} + +static int symbols_cmp_r(const void *a, const void *b) +{ +	const char **str_a = (const char **) a; +	const char **str_b = (const char **) b; + +	return strcmp(*str_a, *str_b); +} + +static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, +					     bool retprobe, bool invalid) +{ +	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); +	int link_fd, err; + +	opts.syms = kmulti_syms; +	opts.cnt = KMULTI_CNT; +	opts.retprobe = retprobe; +	skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, +								       NULL, &opts); +	if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi")) +		return; + +	link_fd = bpf_link__fd(skel->links.kmulti_run); +	if (!invalid) { +		err = verify_kmulti_link_info(link_fd, retprobe); +		ASSERT_OK(err, "verify_kmulti_link_info"); +	} else { +		verify_kmulti_invalid_user_buffer(link_fd); +	} +	bpf_link__detach(skel->links.kmulti_run); +} + +void test_fill_link_info(void) +{ +	struct test_fill_link_info *skel; +	int i; + +	skel = test_fill_link_info__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		return; + +	/* load kallsyms to compare the addr */ +	if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh")) +		goto cleanup; + +	kprobe_addr = ksym_get_addr(KPROBE_FUNC); +	if (test__start_subtest("kprobe_link_info")) +		test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false); +	if (test__start_subtest("kretprobe_link_info")) +		test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, false); +	if (test__start_subtest("kprobe_invalid_ubuff")) +		test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true); +	if (test__start_subtest("tracepoint_link_info")) +		test_tp_fill_link_info(skel); + +	uprobe_offset = get_uprobe_offset(&uprobe_func); +	if (test__start_subtest("uprobe_link_info")) +		test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE); +	if (test__start_subtest("uretprobe_link_info")) +		test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE); + +	qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r); +	for (i = 0; i < KMULTI_CNT; i++) +		kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]); +	if (test__start_subtest("kprobe_multi_link_info")) +		test_kprobe_multi_fill_link_info(skel, false, false); +	if (test__start_subtest("kretprobe_multi_link_info")) +		test_kprobe_multi_fill_link_info(skel, true, false); +	if (test__start_subtest("kprobe_multi_invalid_ubuff")) +		test_kprobe_multi_fill_link_info(skel, true, true); + +cleanup: +	test_fill_link_info__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c index 28cf63963cb7..64a9c95d4acf 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c @@ -30,7 +30,9 @@ void test_get_func_args_test(void)  	prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);  	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); -	ASSERT_EQ(topts.retval, 1234, "test_run"); + +	ASSERT_EQ(topts.retval >> 16, 1, "test_run"); +	ASSERT_EQ(topts.retval & 0xffff, 1234 + 29, "test_run");  	ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");  	ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c index fede8ef58b5b..c40242dfa8fb 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -1,6 +1,11 @@  // SPDX-License-Identifier: GPL-2.0  #include <test_progs.h>  #include "get_func_ip_test.skel.h" +#include "get_func_ip_uprobe_test.skel.h" + +static noinline void uprobe_trigger(void) +{ +}  static void test_function_entry(void)  { @@ -20,6 +25,8 @@ static void test_function_entry(void)  	if (!ASSERT_OK(err, "get_func_ip_test__attach"))  		goto cleanup; +	skel->bss->uprobe_trigger = (unsigned long) uprobe_trigger; +  	prog_fd = bpf_program__fd(skel->progs.test1);  	err = bpf_prog_test_run_opts(prog_fd, &topts);  	ASSERT_OK(err, "test_run"); @@ -30,21 +37,31 @@ static void test_function_entry(void)  	ASSERT_OK(err, "test_run"); +	uprobe_trigger(); +  	ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");  	ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");  	ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");  	ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");  	ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); +	ASSERT_EQ(skel->bss->test7_result, 1, "test7_result"); +	ASSERT_EQ(skel->bss->test8_result, 1, "test8_result");  cleanup:  	get_func_ip_test__destroy(skel);  } -/* test6 is x86_64 specific because of the instruction - * offset, disabling it for all other archs - */  #ifdef __x86_64__ -static void test_function_body(void) +extern void uprobe_trigger_body(void); +asm( +".globl uprobe_trigger_body\n" +".type uprobe_trigger_body, @function\n" +"uprobe_trigger_body:\n" +"	nop\n" +"	ret\n" +); + +static void test_function_body_kprobe(void)  {  	struct get_func_ip_test *skel = NULL;  	LIBBPF_OPTS(bpf_test_run_opts, topts); @@ -56,6 +73,9 @@ static void test_function_body(void)  	if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))  		return; +	/* test6 is x86_64 specific and is disabled by default, +	 * enable it for body test. +	 */  	bpf_program__set_autoload(skel->progs.test6, true);  	err = get_func_ip_test__load(skel); @@ -79,6 +99,35 @@ cleanup:  	bpf_link__destroy(link6);  	get_func_ip_test__destroy(skel);  } + +static void test_function_body_uprobe(void) +{ +	struct get_func_ip_uprobe_test *skel = NULL; +	int err; + +	skel = get_func_ip_uprobe_test__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "get_func_ip_uprobe_test__open_and_load")) +		return; + +	err = get_func_ip_uprobe_test__attach(skel); +	if (!ASSERT_OK(err, "get_func_ip_test__attach")) +		goto cleanup; + +	skel->bss->uprobe_trigger_body = (unsigned long) uprobe_trigger_body; + +	uprobe_trigger_body(); + +	ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); + +cleanup: +	get_func_ip_uprobe_test__destroy(skel); +} + +static void test_function_body(void) +{ +	test_function_body_kprobe(); +	test_function_body_uprobe(); +}  #else  #define test_function_body()  #endif diff --git a/tools/testing/selftests/bpf/prog_tests/global_map_resize.c b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c index fd41425d2e5c..56b5baef35c8 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_map_resize.c +++ b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c @@ -22,7 +22,7 @@ static void global_map_resize_bss_subtest(void)  	struct test_global_map_resize *skel;  	struct bpf_map *map;  	const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2; -	size_t array_len, actual_sz; +	size_t array_len, actual_sz, new_sz;  	skel = test_global_map_resize__open();  	if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) @@ -42,6 +42,10 @@ static void global_map_resize_bss_subtest(void)  	if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))  		goto teardown; +	new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus(); +	err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); +	ASSERT_OK(err, "percpu_arr_resize"); +  	/* set the expected number of elements based on the resized array */  	array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]);  	if (!ASSERT_GT(array_len, 1, "array_len")) @@ -84,11 +88,11 @@ teardown:  static void global_map_resize_data_subtest(void)  { -	int err;  	struct test_global_map_resize *skel;  	struct bpf_map *map;  	const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2; -	size_t array_len, actual_sz; +	size_t array_len, actual_sz, new_sz; +	int err;  	skel = test_global_map_resize__open();  	if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) @@ -108,6 +112,10 @@ static void global_map_resize_data_subtest(void)  	if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))  		goto teardown; +	new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus(); +	err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); +	ASSERT_OK(err, "percpu_arr_resize"); +  	/* set the expected number of elements based on the resized array */  	array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]);  	if (!ASSERT_GT(array_len, 1, "array_len")) diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c new file mode 100644 index 000000000000..57c814f5f6a7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <net/if.h> +#include <linux/netfilter.h> +#include <network_helpers.h> +#include "ip_check_defrag.skel.h" +#include "ip_check_defrag_frags.h" + +/* + * This selftest spins up a client and an echo server, each in their own + * network namespace. The client will send a fragmented message to the server. + * The prog attached to the server will shoot down any fragments. Thus, if + * the server is able to correctly echo back the message to the client, we will + * have verified that netfilter is reassembling packets for us. + * + * Topology: + * ========= + *           NS0         |         NS1 + *                       | + *         client        |       server + *       ----------      |     ---------- + *       |  veth0  | --------- |  veth1  | + *       ----------    peer    ---------- + *                       | + *                       |       with bpf + */ + +#define NS0		"defrag_ns0" +#define NS1		"defrag_ns1" +#define VETH0		"veth0" +#define VETH1		"veth1" +#define VETH0_ADDR	"172.16.1.100" +#define VETH0_ADDR6	"fc00::100" +/* The following constants must stay in sync with `generate_udp_fragments.py` */ +#define VETH1_ADDR	"172.16.1.200" +#define VETH1_ADDR6	"fc00::200" +#define CLIENT_PORT	48878 +#define SERVER_PORT	48879 +#define MAGIC_MESSAGE	"THIS IS THE ORIGINAL MESSAGE, PLEASE REASSEMBLE ME" + +static int setup_topology(bool ipv6) +{ +	bool up; +	int i; + +	SYS(fail, "ip netns add " NS0); +	SYS(fail, "ip netns add " NS1); +	SYS(fail, "ip link add " VETH0 " netns " NS0 " type veth peer name " VETH1 " netns " NS1); +	if (ipv6) { +		SYS(fail, "ip -6 -net " NS0 " addr add " VETH0_ADDR6 "/64 dev " VETH0 " nodad"); +		SYS(fail, "ip -6 -net " NS1 " addr add " VETH1_ADDR6 "/64 dev " VETH1 " nodad"); +	} else { +		SYS(fail, "ip -net " NS0 " addr add " VETH0_ADDR "/24 dev " VETH0); +		SYS(fail, "ip -net " NS1 " addr add " VETH1_ADDR "/24 dev " VETH1); +	} +	SYS(fail, "ip -net " NS0 " link set dev " VETH0 " up"); +	SYS(fail, "ip -net " NS1 " link set dev " VETH1 " up"); + +	/* Wait for up to 5s for links to come up */ +	for (i = 0; i < 5; ++i) { +		if (ipv6) +			up = !system("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6 " &>/dev/null"); +		else +			up = !system("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR " &>/dev/null"); + +		if (up) +			break; +	} + +	return 0; +fail: +	return -1; +} + +static void cleanup_topology(void) +{ +	SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " NS0); +	SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " NS1); +} + +static int attach(struct ip_check_defrag *skel, bool ipv6) +{ +	LIBBPF_OPTS(bpf_netfilter_opts, opts, +		    .pf = ipv6 ? NFPROTO_IPV6 : NFPROTO_IPV4, +		    .priority = 42, +		    .flags = BPF_F_NETFILTER_IP_DEFRAG); +	struct nstoken *nstoken; +	int err = -1; + +	nstoken = open_netns(NS1); + +	skel->links.defrag = bpf_program__attach_netfilter(skel->progs.defrag, &opts); +	if (!ASSERT_OK_PTR(skel->links.defrag, "program attach")) +		goto out; + +	err = 0; +out: +	close_netns(nstoken); +	return err; +} + +static int send_frags(int client) +{ +	struct sockaddr_storage saddr; +	struct sockaddr *saddr_p; +	socklen_t saddr_len; +	int err; + +	saddr_p = (struct sockaddr *)&saddr; +	err = make_sockaddr(AF_INET, VETH1_ADDR, SERVER_PORT, &saddr, &saddr_len); +	if (!ASSERT_OK(err, "make_sockaddr")) +		return -1; + +	err = sendto(client, frag_0, sizeof(frag_0), 0, saddr_p, saddr_len); +	if (!ASSERT_GE(err, 0, "sendto frag_0")) +		return -1; + +	err = sendto(client, frag_1, sizeof(frag_1), 0, saddr_p, saddr_len); +	if (!ASSERT_GE(err, 0, "sendto frag_1")) +		return -1; + +	err = sendto(client, frag_2, sizeof(frag_2), 0, saddr_p, saddr_len); +	if (!ASSERT_GE(err, 0, "sendto frag_2")) +		return -1; + +	return 0; +} + +static int send_frags6(int client) +{ +	struct sockaddr_storage saddr; +	struct sockaddr *saddr_p; +	socklen_t saddr_len; +	int err; + +	saddr_p = (struct sockaddr *)&saddr; +	/* Port needs to be set to 0 for raw ipv6 socket for some reason */ +	err = make_sockaddr(AF_INET6, VETH1_ADDR6, 0, &saddr, &saddr_len); +	if (!ASSERT_OK(err, "make_sockaddr")) +		return -1; + +	err = sendto(client, frag6_0, sizeof(frag6_0), 0, saddr_p, saddr_len); +	if (!ASSERT_GE(err, 0, "sendto frag6_0")) +		return -1; + +	err = sendto(client, frag6_1, sizeof(frag6_1), 0, saddr_p, saddr_len); +	if (!ASSERT_GE(err, 0, "sendto frag6_1")) +		return -1; + +	err = sendto(client, frag6_2, sizeof(frag6_2), 0, saddr_p, saddr_len); +	if (!ASSERT_GE(err, 0, "sendto frag6_2")) +		return -1; + +	return 0; +} + +void test_bpf_ip_check_defrag_ok(bool ipv6) +{ +	struct network_helper_opts rx_opts = { +		.timeout_ms = 1000, +		.noconnect = true, +	}; +	struct network_helper_opts tx_ops = { +		.timeout_ms = 1000, +		.type = SOCK_RAW, +		.proto = IPPROTO_RAW, +		.noconnect = true, +	}; +	struct sockaddr_storage caddr; +	struct ip_check_defrag *skel; +	struct nstoken *nstoken; +	int client_tx_fd = -1; +	int client_rx_fd = -1; +	socklen_t caddr_len; +	int srv_fd = -1; +	char buf[1024]; +	int len, err; + +	skel = ip_check_defrag__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		return; + +	if (!ASSERT_OK(setup_topology(ipv6), "setup_topology")) +		goto out; + +	if (!ASSERT_OK(attach(skel, ipv6), "attach")) +		goto out; + +	/* Start server in ns1 */ +	nstoken = open_netns(NS1); +	if (!ASSERT_OK_PTR(nstoken, "setns ns1")) +		goto out; +	srv_fd = start_server(ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, NULL, SERVER_PORT, 0); +	close_netns(nstoken); +	if (!ASSERT_GE(srv_fd, 0, "start_server")) +		goto out; + +	/* Open tx raw socket in ns0 */ +	nstoken = open_netns(NS0); +	if (!ASSERT_OK_PTR(nstoken, "setns ns0")) +		goto out; +	client_tx_fd = connect_to_fd_opts(srv_fd, &tx_ops); +	close_netns(nstoken); +	if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts")) +		goto out; + +	/* Open rx socket in ns0 */ +	nstoken = open_netns(NS0); +	if (!ASSERT_OK_PTR(nstoken, "setns ns0")) +		goto out; +	client_rx_fd = connect_to_fd_opts(srv_fd, &rx_opts); +	close_netns(nstoken); +	if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts")) +		goto out; + +	/* Bind rx socket to a premeditated port */ +	memset(&caddr, 0, sizeof(caddr)); +	nstoken = open_netns(NS0); +	if (!ASSERT_OK_PTR(nstoken, "setns ns0")) +		goto out; +	if (ipv6) { +		struct sockaddr_in6 *c = (struct sockaddr_in6 *)&caddr; + +		c->sin6_family = AF_INET6; +		inet_pton(AF_INET6, VETH0_ADDR6, &c->sin6_addr); +		c->sin6_port = htons(CLIENT_PORT); +		err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c)); +	} else { +		struct sockaddr_in *c = (struct sockaddr_in *)&caddr; + +		c->sin_family = AF_INET; +		inet_pton(AF_INET, VETH0_ADDR, &c->sin_addr); +		c->sin_port = htons(CLIENT_PORT); +		err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c)); +	} +	close_netns(nstoken); +	if (!ASSERT_OK(err, "bind")) +		goto out; + +	/* Send message in fragments */ +	if (ipv6) { +		if (!ASSERT_OK(send_frags6(client_tx_fd), "send_frags6")) +			goto out; +	} else { +		if (!ASSERT_OK(send_frags(client_tx_fd), "send_frags")) +			goto out; +	} + +	if (!ASSERT_EQ(skel->bss->shootdowns, 0, "shootdowns")) +		goto out; + +	/* Receive reassembled msg on server and echo back to client */ +	caddr_len = sizeof(caddr); +	len = recvfrom(srv_fd, buf, sizeof(buf), 0, (struct sockaddr *)&caddr, &caddr_len); +	if (!ASSERT_GE(len, 0, "server recvfrom")) +		goto out; +	len = sendto(srv_fd, buf, len, 0, (struct sockaddr *)&caddr, caddr_len); +	if (!ASSERT_GE(len, 0, "server sendto")) +		goto out; + +	/* Expect reassembed message to be echoed back */ +	len = recvfrom(client_rx_fd, buf, sizeof(buf), 0, NULL, NULL); +	if (!ASSERT_EQ(len, sizeof(MAGIC_MESSAGE) - 1, "client short read")) +		goto out; + +out: +	if (client_rx_fd != -1) +		close(client_rx_fd); +	if (client_tx_fd != -1) +		close(client_tx_fd); +	if (srv_fd != -1) +		close(srv_fd); +	cleanup_topology(); +	ip_check_defrag__destroy(skel); +} + +void test_bpf_ip_check_defrag(void) +{ +	if (test__start_subtest("v4")) +		test_bpf_ip_check_defrag_ok(false); +	if (test__start_subtest("v6")) +		test_bpf_ip_check_defrag_ok(true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index a543742cd7bd..2eb71559713c 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -173,8 +173,8 @@ static void verify_fail(struct kfunc_test_params *param)  	case tc_test:  		topts.data_in = &pkt_v4;  		topts.data_size_in = sizeof(pkt_v4); -		break;  		topts.repeat = 1; +		break;  	}  	skel = kfunc_call_fail__open_opts(&opts); diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 2173c4bb555e..179fe300534f 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -304,14 +304,6 @@ cleanup:  	kprobe_multi__destroy(skel);  } -static inline __u64 get_time_ns(void) -{ -	struct timespec t; - -	clock_gettime(CLOCK_MONOTONIC, &t); -	return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; -} -  static size_t symbol_hash(long key, void *ctx __maybe_unused)  {  	return str_hash((const char *) key); diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index f63309fd0e28..18cf7b17463d 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -23,7 +23,7 @@ static struct {  	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \  	{ #test "_missing_lock_pop_back", \  	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, -	TEST(kptr, 32) +	TEST(kptr, 40)  	TEST(global, 16)  	TEST(map, 0)  	TEST(inner_map, 0) @@ -31,7 +31,7 @@ static struct {  #define TEST(test, op) \  	{ #test "_kptr_incorrect_lock_" #op, \  	  "held lock and object are not in the same allocation\n" \ -	  "bpf_spin_lock at off=32 must be held for bpf_list_head" }, \ +	  "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \  	{ #test "_global_incorrect_lock_" #op, \  	  "held lock and object are not in the same allocation\n" \  	  "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \ @@ -84,23 +84,23 @@ static struct {  	{ "double_push_back", "arg#1 expected pointer to allocated object" },  	{ "no_node_value_type", "bpf_list_node not found at offset=0" },  	{ "incorrect_value_type", -	  "operation on bpf_list_head expects arg#1 bpf_list_node at offset=40 in struct foo, " +	  "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "  	  "but arg is at offset=0 in struct bar" },  	{ "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, -	{ "incorrect_node_off1", "bpf_list_node not found at offset=41" }, -	{ "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=40 in struct foo" }, +	{ "incorrect_node_off1", "bpf_list_node not found at offset=49" }, +	{ "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },  	{ "no_head_type", "bpf_list_head not found at offset=0" },  	{ "incorrect_head_var_off1", "R1 doesn't have constant offset" },  	{ "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, -	{ "incorrect_head_off1", "bpf_list_head not found at offset=17" }, +	{ "incorrect_head_off1", "bpf_list_head not found at offset=25" },  	{ "incorrect_head_off2", "bpf_list_head not found at offset=1" },  	{ "pop_front_off", -	  "15: (bf) r1 = r6                      ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " -	  "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" +	  "15: (bf) r1 = r6                      ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) " +	  "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"  	  "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },  	{ "pop_back_off", -	  "15: (bf) r1 = r6                      ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " -	  "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" +	  "15: (bf) r1 = r6                      ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) " +	  "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"  	  "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },  }; @@ -257,7 +257,7 @@ static struct btf *init_btf(void)  	hid = btf__add_struct(btf, "bpf_list_head", 16);  	if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))  		goto end; -	nid = btf__add_struct(btf, "bpf_list_node", 16); +	nid = btf__add_struct(btf, "bpf_list_node", 24);  	if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))  		goto end;  	return btf; @@ -276,7 +276,7 @@ static void list_and_rb_node_same_struct(bool refcount_field)  	if (!ASSERT_OK_PTR(btf, "init_btf"))  		return; -	bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 24); +	bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);  	if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))  		return; @@ -286,17 +286,17 @@ static void list_and_rb_node_same_struct(bool refcount_field)  			return;  	} -	id = btf__add_struct(btf, "bar", refcount_field ? 44 : 40); +	id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);  	if (!ASSERT_GT(id, 0, "btf__add_struct bar"))  		return;  	err = btf__add_field(btf, "a", LIST_NODE, 0, 0);  	if (!ASSERT_OK(err, "btf__add_field bar::a"))  		return; -	err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 128, 0); +	err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);  	if (!ASSERT_OK(err, "btf__add_field bar::c"))  		return;  	if (refcount_field) { -		err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 320, 0); +		err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::ref"))  			return;  	} @@ -527,7 +527,7 @@ static void test_btf(void)  		btf = init_btf();  		if (!ASSERT_OK_PTR(btf, "init_btf"))  			break; -		id = btf__add_struct(btf, "foo", 36); +		id = btf__add_struct(btf, "foo", 44);  		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -536,7 +536,7 @@ static void test_btf(void)  		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::b"))  			break; -		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); +		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::c"))  			break;  		id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); @@ -553,7 +553,7 @@ static void test_btf(void)  		btf = init_btf();  		if (!ASSERT_OK_PTR(btf, "init_btf"))  			break; -		id = btf__add_struct(btf, "foo", 36); +		id = btf__add_struct(btf, "foo", 44);  		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -562,13 +562,13 @@ static void test_btf(void)  		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::b"))  			break; -		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); +		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::c"))  			break;  		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);  		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))  			break; -		id = btf__add_struct(btf, "bar", 36); +		id = btf__add_struct(btf, "bar", 44);  		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -577,7 +577,7 @@ static void test_btf(void)  		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::b"))  			break; -		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); +		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::c"))  			break;  		id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0); @@ -594,19 +594,19 @@ static void test_btf(void)  		btf = init_btf();  		if (!ASSERT_OK_PTR(btf, "init_btf"))  			break; -		id = btf__add_struct(btf, "foo", 20); +		id = btf__add_struct(btf, "foo", 28);  		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::a"))  			break; -		err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); +		err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::b"))  			break;  		id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);  		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))  			break; -		id = btf__add_struct(btf, "bar", 16); +		id = btf__add_struct(btf, "bar", 24);  		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))  			break;  		err = btf__add_field(btf, "a", LIST_NODE, 0, 0); @@ -623,19 +623,19 @@ static void test_btf(void)  		btf = init_btf();  		if (!ASSERT_OK_PTR(btf, "init_btf"))  			break; -		id = btf__add_struct(btf, "foo", 20); +		id = btf__add_struct(btf, "foo", 28);  		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::a"))  			break; -		err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); +		err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::b"))  			break;  		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);  		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))  			break; -		id = btf__add_struct(btf, "bar", 36); +		id = btf__add_struct(btf, "bar", 44);  		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -644,13 +644,13 @@ static void test_btf(void)  		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::b"))  			break; -		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); +		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::c"))  			break;  		id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);  		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))  			break; -		id = btf__add_struct(btf, "baz", 16); +		id = btf__add_struct(btf, "baz", 24);  		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))  			break;  		err = btf__add_field(btf, "a", LIST_NODE, 0, 0); @@ -667,7 +667,7 @@ static void test_btf(void)  		btf = init_btf();  		if (!ASSERT_OK_PTR(btf, "init_btf"))  			break; -		id = btf__add_struct(btf, "foo", 36); +		id = btf__add_struct(btf, "foo", 44);  		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -676,13 +676,13 @@ static void test_btf(void)  		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::b"))  			break; -		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); +		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);  		if (!ASSERT_OK(err, "btf__add_field foo::c"))  			break;  		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);  		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))  			break; -		id = btf__add_struct(btf, "bar", 36); +		id = btf__add_struct(btf, "bar", 44);  		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -691,13 +691,13 @@ static void test_btf(void)  		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);  		if (!ASSERT_OK(err, "btf__add_field bar:b"))  			break; -		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); +		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);  		if (!ASSERT_OK(err, "btf__add_field bar:c"))  			break;  		id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);  		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))  			break; -		id = btf__add_struct(btf, "baz", 16); +		id = btf__add_struct(btf, "baz", 24);  		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))  			break;  		err = btf__add_field(btf, "a", LIST_NODE, 0, 0); @@ -726,7 +726,7 @@ static void test_btf(void)  		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);  		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))  			break; -		id = btf__add_struct(btf, "bar", 36); +		id = btf__add_struct(btf, "bar", 44);  		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -735,13 +735,13 @@ static void test_btf(void)  		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::b"))  			break; -		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); +		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::c"))  			break;  		id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);  		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))  			break; -		id = btf__add_struct(btf, "baz", 36); +		id = btf__add_struct(btf, "baz", 44);  		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))  			break;  		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); @@ -750,13 +750,13 @@ static void test_btf(void)  		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::b"))  			break; -		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); +		err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);  		if (!ASSERT_OK(err, "btf__add_field bar::c"))  			break;  		id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);  		if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))  			break; -		id = btf__add_struct(btf, "bam", 16); +		id = btf__add_struct(btf, "bam", 24);  		if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))  			break;  		err = btf__add_field(btf, "a", LIST_NODE, 0, 0); diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c index 76f1da877f81..b25b870f87ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c @@ -5,6 +5,7 @@  #include <network_helpers.h>  #include "local_kptr_stash.skel.h" +#include "local_kptr_stash_fail.skel.h"  static void test_local_kptr_stash_simple(void)  {  	LIBBPF_OPTS(bpf_test_run_opts, opts, @@ -26,6 +27,27 @@ static void test_local_kptr_stash_simple(void)  	local_kptr_stash__destroy(skel);  } +static void test_local_kptr_stash_plain(void) +{ +	LIBBPF_OPTS(bpf_test_run_opts, opts, +		    .data_in = &pkt_v4, +		    .data_size_in = sizeof(pkt_v4), +		    .repeat = 1, +	); +	struct local_kptr_stash *skel; +	int ret; + +	skel = local_kptr_stash__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load")) +		return; + +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_plain), &opts); +	ASSERT_OK(ret, "local_kptr_stash_add_plain run"); +	ASSERT_OK(opts.retval, "local_kptr_stash_add_plain retval"); + +	local_kptr_stash__destroy(skel); +} +  static void test_local_kptr_stash_unstash(void)  {  	LIBBPF_OPTS(bpf_test_run_opts, opts, @@ -51,10 +73,19 @@ static void test_local_kptr_stash_unstash(void)  	local_kptr_stash__destroy(skel);  } -void test_local_kptr_stash_success(void) +static void test_local_kptr_stash_fail(void) +{ +	RUN_TESTS(local_kptr_stash_fail); +} + +void test_local_kptr_stash(void)  {  	if (test__start_subtest("local_kptr_stash_simple"))  		test_local_kptr_stash_simple(); +	if (test__start_subtest("local_kptr_stash_plain")) +		test_local_kptr_stash_plain();  	if (test__start_subtest("local_kptr_stash_unstash"))  		test_local_kptr_stash_unstash(); +	if (test__start_subtest("local_kptr_stash_fail")) +		test_local_kptr_stash_fail();  } diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index dba71d98a227..effd78b2a657 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -124,7 +124,7 @@ static void missing_map(void)  	ASSERT_FALSE(bpf_map__autocreate(skel->maps.missing_map), "missing_map_autocreate");  	ASSERT_HAS_SUBSTR(log_buf, -			  "8: <invalid BPF map reference>\n" +			  ": <invalid BPF map reference>\n"  			  "BPF map 'missing_map' is referenced but wasn't created\n",  			  "log_buf"); diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h new file mode 100644 index 000000000000..61333f2a03f9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LWT_HELPERS_H +#define __LWT_HELPERS_H + +#include <time.h> +#include <net/if.h> +#include <linux/if_tun.h> +#include <linux/icmp.h> + +#include "test_progs.h" + +#define log_err(MSG, ...) \ +	fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ +		__FILE__, __LINE__, strerror(errno), ##__VA_ARGS__) + +#define RUN_TEST(name)                                                        \ +	({                                                                    \ +		if (test__start_subtest(#name))                               \ +			if (ASSERT_OK(netns_create(), "netns_create")) {      \ +				struct nstoken *token = open_netns(NETNS);    \ +				if (ASSERT_OK_PTR(token, "setns")) {          \ +					test_ ## name();                      \ +					close_netns(token);                   \ +				}                                             \ +				netns_delete();                               \ +			}                                                     \ +	}) + +#define NETNS "ns_lwt" + +static inline int netns_create(void) +{ +	return system("ip netns add " NETNS); +} + +static inline int netns_delete(void) +{ +	return system("ip netns del " NETNS ">/dev/null 2>&1"); +} + +static int open_tuntap(const char *dev_name, bool need_mac) +{ +	int err = 0; +	struct ifreq ifr; +	int fd = open("/dev/net/tun", O_RDWR); + +	if (!ASSERT_GT(fd, 0, "open(/dev/net/tun)")) +		return -1; + +	ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); +	memcpy(ifr.ifr_name, dev_name, IFNAMSIZ); + +	err = ioctl(fd, TUNSETIFF, &ifr); +	if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) { +		close(fd); +		return -1; +	} + +	err = fcntl(fd, F_SETFL, O_NONBLOCK); +	if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { +		close(fd); +		return -1; +	} + +	return fd; +} + +#define ICMP_PAYLOAD_SIZE     100 + +/* Match an ICMP packet with payload len ICMP_PAYLOAD_SIZE */ +static int __expect_icmp_ipv4(char *buf, ssize_t len) +{ +	struct iphdr *ip = (struct iphdr *)buf; +	struct icmphdr *icmp = (struct icmphdr *)(ip + 1); +	ssize_t min_header_len = sizeof(*ip) + sizeof(*icmp); + +	if (len < min_header_len) +		return -1; + +	if (ip->protocol != IPPROTO_ICMP) +		return -1; + +	if (icmp->type != ICMP_ECHO) +		return -1; + +	return len == ICMP_PAYLOAD_SIZE + min_header_len; +} + +typedef int (*filter_t) (char *, ssize_t); + +/* wait_for_packet - wait for a packet that matches the filter + * + * @fd: tun fd/packet socket to read packet + * @filter: filter function, returning 1 if matches + * @timeout: timeout to wait for the packet + * + * Returns 1 if a matching packet is read, 0 if timeout expired, -1 on error. + */ +static int wait_for_packet(int fd, filter_t filter, struct timeval *timeout) +{ +	char buf[4096]; +	int max_retry = 5; /* in case we read some spurious packets */ +	fd_set fds; + +	FD_ZERO(&fds); +	while (max_retry--) { +		/* Linux modifies timeout arg... So make a copy */ +		struct timeval copied_timeout = *timeout; +		ssize_t ret = -1; + +		FD_SET(fd, &fds); + +		ret = select(1 + fd, &fds, NULL, NULL, &copied_timeout); +		if (ret <= 0) { +			if (errno == EINTR) +				continue; +			else if (errno == EAGAIN || ret == 0) +				return 0; + +			log_err("select failed"); +			return -1; +		} + +		ret = read(fd, buf, sizeof(buf)); + +		if (ret <= 0) { +			log_err("read(dev): %ld", ret); +			return -1; +		} + +		if (filter && filter(buf, ret) > 0) +			return 1; +	} + +	return 0; +} + +#endif /* __LWT_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c new file mode 100644 index 000000000000..59b38569f310 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * Test suite of lwt_xmit BPF programs that redirect packets + *   The file tests focus not only if these programs work as expected normally, + *   but also if they can handle abnormal situations gracefully. + * + * WARNING + * ------- + *  This test suite may crash the kernel, thus should be run in a VM. + * + * Setup: + * --------- + *  All tests are performed in a single netns. Two lwt encap routes are setup for + *  each subtest: + * + *    ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<ingress_sec>" dev link_err + *    ip route add 20.0.0.0/24 encap bpf xmit <obj> sec "<egress_sec>" dev link_err + * + *  Here <obj> is statically defined to test_lwt_redirect.bpf.o, and each section + *  of this object holds a program entry to test. The BPF object is built from + *  progs/test_lwt_redirect.c. We didn't use generated BPF skeleton since the + *  attachment for lwt programs are not supported by libbpf yet. + * + *  For testing, ping commands are run in the test netns: + * + *    ping 10.0.0.<ifindex> -c 1 -w 1 -s 100 + *    ping 20.0.0.<ifindex> -c 1 -w 1 -s 100 + * + * Scenarios: + * -------------------------------- + *  1. Redirect to a running tap/tun device + *  2. Redirect to a down tap/tun device + *  3. Redirect to a vlan device with lower layer down + * + *  Case 1, ping packets should be received by packet socket on target device + *  when redirected to ingress, and by tun/tap fd when redirected to egress. + * + *  Case 2,3 are considered successful as long as they do not crash the kernel + *  as a regression. + * + *  Case 1,2 use tap device to test redirect to device that requires MAC + *  header, and tun device to test the case with no MAC header added. + */ +#include <sys/socket.h> +#include <net/if.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/if_tun.h> +#include <linux/icmp.h> +#include <arpa/inet.h> +#include <unistd.h> +#include <errno.h> +#include <stdbool.h> +#include <stdlib.h> + +#include "lwt_helpers.h" +#include "test_progs.h" +#include "network_helpers.h" + +#define BPF_OBJECT            "test_lwt_redirect.bpf.o" +#define INGRESS_SEC(need_mac) ((need_mac) ? "redir_ingress" : "redir_ingress_nomac") +#define EGRESS_SEC(need_mac)  ((need_mac) ? "redir_egress" : "redir_egress_nomac") +#define LOCAL_SRC             "10.0.0.1" +#define CIDR_TO_INGRESS       "10.0.0.0/24" +#define CIDR_TO_EGRESS        "20.0.0.0/24" + +/* ping to redirect toward given dev, with last byte of dest IP being the target + * device index. + * + * Note: ping command inside BPF-CI is busybox version, so it does not have certain + * function, such like -m option to set packet mark. + */ +static void ping_dev(const char *dev, bool is_ingress) +{ +	int link_index = if_nametoindex(dev); +	char ip[256]; + +	if (!ASSERT_GE(link_index, 0, "if_nametoindex")) +		return; + +	if (is_ingress) +		snprintf(ip, sizeof(ip), "10.0.0.%d", link_index); +	else +		snprintf(ip, sizeof(ip), "20.0.0.%d", link_index); + +	/* We won't get a reply. Don't fail here */ +	SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", +		   ip, ICMP_PAYLOAD_SIZE); +} + +static int new_packet_sock(const char *ifname) +{ +	int err = 0; +	int ignore_outgoing = 1; +	int ifindex = -1; +	int s = -1; + +	s = socket(AF_PACKET, SOCK_RAW, 0); +	if (!ASSERT_GE(s, 0, "socket(AF_PACKET)")) +		return -1; + +	ifindex = if_nametoindex(ifname); +	if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) { +		close(s); +		return -1; +	} + +	struct sockaddr_ll addr = { +		.sll_family = AF_PACKET, +		.sll_protocol = htons(ETH_P_IP), +		.sll_ifindex = ifindex, +	}; + +	err = bind(s, (struct sockaddr *)&addr, sizeof(addr)); +	if (!ASSERT_OK(err, "bind(AF_PACKET)")) { +		close(s); +		return -1; +	} + +	/* Use packet socket to capture only the ingress, so we can distinguish +	 * the case where a regression that actually redirects the packet to +	 * the egress. +	 */ +	err = setsockopt(s, SOL_PACKET, PACKET_IGNORE_OUTGOING, +			 &ignore_outgoing, sizeof(ignore_outgoing)); +	if (!ASSERT_OK(err, "setsockopt(PACKET_IGNORE_OUTGOING)")) { +		close(s); +		return -1; +	} + +	err = fcntl(s, F_SETFL, O_NONBLOCK); +	if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { +		close(s); +		return -1; +	} + +	return s; +} + +static int expect_icmp(char *buf, ssize_t len) +{ +	struct ethhdr *eth = (struct ethhdr *)buf; + +	if (len < (ssize_t)sizeof(*eth)) +		return -1; + +	if (eth->h_proto == htons(ETH_P_IP)) +		return __expect_icmp_ipv4((char *)(eth + 1), len - sizeof(*eth)); + +	return -1; +} + +static int expect_icmp_nomac(char *buf, ssize_t len) +{ +	return __expect_icmp_ipv4(buf, len); +} + +static void send_and_capture_test_packets(const char *test_name, int tap_fd, +					  const char *target_dev, bool need_mac) +{ +	int psock = -1; +	struct timeval timeo = { +		.tv_sec = 0, +		.tv_usec = 250000, +	}; +	int ret = -1; + +	filter_t filter = need_mac ? expect_icmp : expect_icmp_nomac; + +	ping_dev(target_dev, false); + +	ret = wait_for_packet(tap_fd, filter, &timeo); +	if (!ASSERT_EQ(ret, 1, "wait_for_epacket")) { +		log_err("%s egress test fails", test_name); +		goto out; +	} + +	psock = new_packet_sock(target_dev); +	ping_dev(target_dev, true); + +	ret = wait_for_packet(psock, filter, &timeo); +	if (!ASSERT_EQ(ret, 1, "wait_for_ipacket")) { +		log_err("%s ingress test fails", test_name); +		goto out; +	} + +out: +	if (psock >= 0) +		close(psock); +} + +static int setup_redirect_target(const char *target_dev, bool need_mac) +{ +	int target_index = -1; +	int tap_fd = -1; + +	tap_fd = open_tuntap(target_dev, need_mac); +	if (!ASSERT_GE(tap_fd, 0, "open_tuntap")) +		goto fail; + +	target_index = if_nametoindex(target_dev); +	if (!ASSERT_GE(target_index, 0, "if_nametoindex")) +		goto fail; + +	SYS(fail, "ip link add link_err type dummy"); +	SYS(fail, "ip link set lo up"); +	SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32"); +	SYS(fail, "ip link set link_err up"); +	SYS(fail, "ip link set %s up", target_dev); + +	SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s", +	    CIDR_TO_INGRESS, BPF_OBJECT, INGRESS_SEC(need_mac)); + +	SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s", +	    CIDR_TO_EGRESS, BPF_OBJECT, EGRESS_SEC(need_mac)); + +	return tap_fd; + +fail: +	if (tap_fd >= 0) +		close(tap_fd); +	return -1; +} + +static void test_lwt_redirect_normal(void) +{ +	const char *target_dev = "tap0"; +	int tap_fd = -1; +	bool need_mac = true; + +	tap_fd = setup_redirect_target(target_dev, need_mac); +	if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) +		return; + +	send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac); +	close(tap_fd); +} + +static void test_lwt_redirect_normal_nomac(void) +{ +	const char *target_dev = "tun0"; +	int tap_fd = -1; +	bool need_mac = false; + +	tap_fd = setup_redirect_target(target_dev, need_mac); +	if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) +		return; + +	send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac); +	close(tap_fd); +} + +/* This test aims to prevent regression of future. As long as the kernel does + * not panic, it is considered as success. + */ +static void __test_lwt_redirect_dev_down(bool need_mac) +{ +	const char *target_dev = "tap0"; +	int tap_fd = -1; + +	tap_fd = setup_redirect_target(target_dev, need_mac); +	if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) +		return; + +	SYS(out, "ip link set %s down", target_dev); +	ping_dev(target_dev, true); +	ping_dev(target_dev, false); + +out: +	close(tap_fd); +} + +static void test_lwt_redirect_dev_down(void) +{ +	__test_lwt_redirect_dev_down(true); +} + +static void test_lwt_redirect_dev_down_nomac(void) +{ +	__test_lwt_redirect_dev_down(false); +} + +/* This test aims to prevent regression of future. As long as the kernel does + * not panic, it is considered as success. + */ +static void test_lwt_redirect_dev_carrier_down(void) +{ +	const char *lower_dev = "tap0"; +	const char *vlan_dev = "vlan100"; +	int tap_fd = -1; + +	tap_fd = setup_redirect_target(lower_dev, true); +	if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target")) +		return; + +	SYS(out, "ip link add vlan100 link %s type vlan id 100", lower_dev); +	SYS(out, "ip link set %s up", vlan_dev); +	SYS(out, "ip link set %s down", lower_dev); +	ping_dev(vlan_dev, true); +	ping_dev(vlan_dev, false); + +out: +	close(tap_fd); +} + +static void *test_lwt_redirect_run(void *arg) +{ +	netns_delete(); +	RUN_TEST(lwt_redirect_normal); +	RUN_TEST(lwt_redirect_normal_nomac); +	RUN_TEST(lwt_redirect_dev_down); +	RUN_TEST(lwt_redirect_dev_down_nomac); +	RUN_TEST(lwt_redirect_dev_carrier_down); +	return NULL; +} + +void test_lwt_redirect(void) +{ +	pthread_t test_thread; +	int err; + +	/* Run the tests in their own thread to isolate the namespace changes +	 * so they do not affect the environment of other tests. +	 * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) +	 */ +	err = pthread_create(&test_thread, NULL, &test_lwt_redirect_run, NULL); +	if (ASSERT_OK(err, "pthread_create")) +		ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c new file mode 100644 index 000000000000..f4bb2d5fcae0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * Test suite of lwt BPF programs that reroutes packets + *   The file tests focus not only if these programs work as expected normally, + *   but also if they can handle abnormal situations gracefully. This test + *   suite currently only covers lwt_xmit hook. lwt_in tests have not been + *   implemented. + * + * WARNING + * ------- + *  This test suite can crash the kernel, thus should be run in a VM. + * + * Setup: + * --------- + *  all tests are performed in a single netns. A lwt encap route is setup for + *  each subtest: + * + *    ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<section_N>" dev link_err + * + *  Here <obj> is statically defined to test_lwt_reroute.bpf.o, and it contains + *  a single test program entry. This program sets packet mark by last byte of + *  the IPv4 daddr. For example, a packet going to 1.2.3.4 will receive a skb + *  mark 4. A packet will only be marked once, and IP x.x.x.0 will be skipped + *  to avoid route loop. We didn't use generated BPF skeleton since the + *  attachment for lwt programs are not supported by libbpf yet. + * + *  The test program will bring up a tun device, and sets up the following + *  routes: + * + *    ip rule add pref 100 from all fwmark <tun_index> lookup 100 + *    ip route add table 100 default dev tun0 + * + *  For normal testing, a ping command is running in the test netns: + * + *    ping 10.0.0.<tun_index> -c 1 -w 1 -s 100 + * + *  For abnormal testing, fq is used as the qdisc of the tun device. Then a UDP + *  socket will try to overflow the fq queue and trigger qdisc drop error. + * + * Scenarios: + * -------------------------------- + *  1. Reroute to a running tun device + *  2. Reroute to a device where qdisc drop + * + *  For case 1, ping packets should be received by the tun device. + * + *  For case 2, force UDP packets to overflow fq limit. As long as kernel + *  is not crashed, it is considered successful. + */ +#include "lwt_helpers.h" +#include "network_helpers.h" +#include <linux/net_tstamp.h> + +#define BPF_OBJECT            "test_lwt_reroute.bpf.o" +#define LOCAL_SRC             "10.0.0.1" +#define TEST_CIDR             "10.0.0.0/24" +#define XMIT_HOOK             "xmit" +#define XMIT_SECTION          "lwt_xmit" +#define NSEC_PER_SEC          1000000000ULL + +/* send a ping to be rerouted to the target device */ +static void ping_once(const char *ip) +{ +	/* We won't get a reply. Don't fail here */ +	SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", +		   ip, ICMP_PAYLOAD_SIZE); +} + +/* Send snd_target UDP packets to overflow the fq queue and trigger qdisc drop + * error. This is done via TX tstamp to force buffering delayed packets. + */ +static int overflow_fq(int snd_target, const char *target_ip) +{ +	struct sockaddr_in addr = { +		.sin_family = AF_INET, +		.sin_port = htons(1234), +	}; + +	char data_buf[8]; /* only #pkts matter, so use a random small buffer */ +	char control_buf[CMSG_SPACE(sizeof(uint64_t))]; +	struct iovec iov = { +		.iov_base = data_buf, +		.iov_len = sizeof(data_buf), +	}; +	int err = -1; +	int s = -1; +	struct sock_txtime txtime_on = { +		.clockid = CLOCK_MONOTONIC, +		.flags = 0, +	}; +	struct msghdr msg = { +		.msg_name = &addr, +		.msg_namelen = sizeof(addr), +		.msg_control = control_buf, +		.msg_controllen = sizeof(control_buf), +		.msg_iovlen = 1, +		.msg_iov = &iov, +	}; +	struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); + +	memset(data_buf, 0, sizeof(data_buf)); + +	s = socket(AF_INET, SOCK_DGRAM, 0); +	if (!ASSERT_GE(s, 0, "socket")) +		goto out; + +	err = setsockopt(s, SOL_SOCKET, SO_TXTIME, &txtime_on, sizeof(txtime_on)); +	if (!ASSERT_OK(err, "setsockopt(SO_TXTIME)")) +		goto out; + +	err = inet_pton(AF_INET, target_ip, &addr.sin_addr); +	if (!ASSERT_EQ(err, 1, "inet_pton")) +		goto out; + +	while (snd_target > 0) { +		struct timespec now; + +		memset(control_buf, 0, sizeof(control_buf)); +		cmsg->cmsg_type = SCM_TXTIME; +		cmsg->cmsg_level = SOL_SOCKET; +		cmsg->cmsg_len = CMSG_LEN(sizeof(uint64_t)); + +		err = clock_gettime(CLOCK_MONOTONIC, &now); +		if (!ASSERT_OK(err, "clock_gettime(CLOCK_MONOTONIC)")) { +			err = -1; +			goto out; +		} + +		*(uint64_t *)CMSG_DATA(cmsg) = (now.tv_nsec + 1) * NSEC_PER_SEC + +					       now.tv_nsec; + +		/* we will intentionally send more than fq limit, so ignore +		 * the error here. +		 */ +		sendmsg(s, &msg, MSG_NOSIGNAL); +		snd_target--; +	} + +	/* no kernel crash so far is considered success */ +	err = 0; + +out: +	if (s >= 0) +		close(s); + +	return err; +} + +static int setup(const char *tun_dev) +{ +	int target_index = -1; +	int tap_fd = -1; + +	tap_fd = open_tuntap(tun_dev, false); +	if (!ASSERT_GE(tap_fd, 0, "open_tun")) +		return -1; + +	target_index = if_nametoindex(tun_dev); +	if (!ASSERT_GE(target_index, 0, "if_nametoindex")) +		return -1; + +	SYS(fail, "ip link add link_err type dummy"); +	SYS(fail, "ip link set lo up"); +	SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32"); +	SYS(fail, "ip link set link_err up"); +	SYS(fail, "ip link set %s up", tun_dev); + +	SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec lwt_xmit", +	    TEST_CIDR, BPF_OBJECT); + +	SYS(fail, "ip rule add pref 100 from all fwmark %d lookup 100", +	    target_index); +	SYS(fail, "ip route add t 100 default dev %s", tun_dev); + +	return tap_fd; + +fail: +	if (tap_fd >= 0) +		close(tap_fd); +	return -1; +} + +static void test_lwt_reroute_normal_xmit(void) +{ +	const char *tun_dev = "tun0"; +	int tun_fd = -1; +	int ifindex = -1; +	char ip[256]; +	struct timeval timeo = { +		.tv_sec = 0, +		.tv_usec = 250000, +	}; + +	tun_fd = setup(tun_dev); +	if (!ASSERT_GE(tun_fd, 0, "setup_reroute")) +		return; + +	ifindex = if_nametoindex(tun_dev); +	if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) +		return; + +	snprintf(ip, 256, "10.0.0.%d", ifindex); + +	/* ping packets should be received by the tun device */ +	ping_once(ip); + +	if (!ASSERT_EQ(wait_for_packet(tun_fd, __expect_icmp_ipv4, &timeo), 1, +		       "wait_for_packet")) +		log_err("%s xmit", __func__); +} + +/* + * Test the failure case when the skb is dropped at the qdisc. This is a + * regression prevention at the xmit hook only. + */ +static void test_lwt_reroute_qdisc_dropped(void) +{ +	const char *tun_dev = "tun0"; +	int tun_fd = -1; +	int ifindex = -1; +	char ip[256]; + +	tun_fd = setup(tun_dev); +	if (!ASSERT_GE(tun_fd, 0, "setup_reroute")) +		goto fail; + +	SYS(fail, "tc qdisc replace dev %s root fq limit 5 flow_limit 5", tun_dev); + +	ifindex = if_nametoindex(tun_dev); +	if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) +		return; + +	snprintf(ip, 256, "10.0.0.%d", ifindex); +	ASSERT_EQ(overflow_fq(10, ip), 0, "overflow_fq"); + +fail: +	if (tun_fd >= 0) +		close(tun_fd); +} + +static void *test_lwt_reroute_run(void *arg) +{ +	netns_delete(); +	RUN_TEST(lwt_reroute_normal_xmit); +	RUN_TEST(lwt_reroute_qdisc_dropped); +	return NULL; +} + +void test_lwt_reroute(void) +{ +	pthread_t test_thread; +	int err; + +	/* Run the tests in their own thread to isolate the namespace changes +	 * so they do not affect the environment of other tests. +	 * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) +	 */ +	err = pthread_create(&test_thread, NULL, &test_lwt_reroute_run, NULL); +	if (ASSERT_OK(err, "pthread_create")) +		ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c index 5d9955af6247..a70c99c2f8c8 100644 --- a/tools/testing/selftests/bpf/prog_tests/modify_return.c +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -41,6 +41,10 @@ static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret)  	ASSERT_EQ(skel->bss->fexit_result, 1, "modify_return fexit_result");  	ASSERT_EQ(skel->bss->fmod_ret_result, 1, "modify_return fmod_ret_result"); +	ASSERT_EQ(skel->bss->fentry_result2, 1, "modify_return fentry_result2"); +	ASSERT_EQ(skel->bss->fexit_result2, 1, "modify_return fexit_result2"); +	ASSERT_EQ(skel->bss->fmod_ret_result2, 1, "modify_return fmod_ret_result2"); +  cleanup:  	modify_return__destroy(skel);  } @@ -49,9 +53,9 @@ cleanup:  void serial_test_modify_return(void)  {  	run_test(0 /* input_retval */, -		 1 /* want_side_effect */, -		 4 /* want_ret */); +		 2 /* want_side_effect */, +		 33 /* want_ret */);  	run_test(-EINVAL /* input_retval */,  		 0 /* want_side_effect */, -		 -EINVAL /* want_ret */); +		 -EINVAL * 2 /* want_ret */);  } diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index cd0c42fff7c0..7c0be7cf550b 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -2,17 +2,59 @@  /* Copyright (c) 2020, Tessares SA. */  /* Copyright (c) 2022, SUSE. */ +#include <linux/const.h> +#include <netinet/in.h>  #include <test_progs.h>  #include "cgroup_helpers.h"  #include "network_helpers.h"  #include "mptcp_sock.skel.h" +#include "mptcpify.skel.h"  #define NS_TEST "mptcp_ns" +#ifndef IPPROTO_MPTCP +#define IPPROTO_MPTCP 262 +#endif + +#ifndef SOL_MPTCP +#define SOL_MPTCP 284 +#endif +#ifndef MPTCP_INFO +#define MPTCP_INFO		1 +#endif +#ifndef MPTCP_INFO_FLAG_FALLBACK +#define MPTCP_INFO_FLAG_FALLBACK		_BITUL(0) +#endif +#ifndef MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED +#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED	_BITUL(1) +#endif +  #ifndef TCP_CA_NAME_MAX  #define TCP_CA_NAME_MAX	16  #endif +struct __mptcp_info { +	__u8	mptcpi_subflows; +	__u8	mptcpi_add_addr_signal; +	__u8	mptcpi_add_addr_accepted; +	__u8	mptcpi_subflows_max; +	__u8	mptcpi_add_addr_signal_max; +	__u8	mptcpi_add_addr_accepted_max; +	__u32	mptcpi_flags; +	__u32	mptcpi_token; +	__u64	mptcpi_write_seq; +	__u64	mptcpi_snd_una; +	__u64	mptcpi_rcv_nxt; +	__u8	mptcpi_local_addr_used; +	__u8	mptcpi_local_addr_max; +	__u8	mptcpi_csum_enabled; +	__u32	mptcpi_retransmits; +	__u64	mptcpi_bytes_retrans; +	__u64	mptcpi_bytes_sent; +	__u64	mptcpi_bytes_received; +	__u64	mptcpi_bytes_acked; +}; +  struct mptcp_storage {  	__u32 invoked;  	__u32 is_mptcp; @@ -22,6 +64,24 @@ struct mptcp_storage {  	char ca_name[TCP_CA_NAME_MAX];  }; +static struct nstoken *create_netns(void) +{ +	SYS(fail, "ip netns add %s", NS_TEST); +	SYS(fail, "ip -net %s link set dev lo up", NS_TEST); + +	return open_netns(NS_TEST); +fail: +	return NULL; +} + +static void cleanup_netns(struct nstoken *nstoken) +{ +	if (nstoken) +		close_netns(nstoken); + +	SYS_NOFAIL("ip netns del %s &> /dev/null", NS_TEST); +} +  static int verify_tsk(int map_fd, int client_fd)  {  	int err, cfd = client_fd; @@ -100,24 +160,14 @@ static int run_test(int cgroup_fd, int server_fd, bool is_mptcp)  	sock_skel = mptcp_sock__open_and_load();  	if (!ASSERT_OK_PTR(sock_skel, "skel_open_load")) -		return -EIO; +		return libbpf_get_error(sock_skel);  	err = mptcp_sock__attach(sock_skel);  	if (!ASSERT_OK(err, "skel_attach"))  		goto out;  	prog_fd = bpf_program__fd(sock_skel->progs._sockops); -	if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) { -		err = -EIO; -		goto out; -	} -  	map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map); -	if (!ASSERT_GE(map_fd, 0, "bpf_map__fd")) { -		err = -EIO; -		goto out; -	} -  	err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);  	if (!ASSERT_OK(err, "bpf_prog_attach"))  		goto out; @@ -147,11 +197,8 @@ static void test_base(void)  	if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))  		return; -	SYS(fail, "ip netns add %s", NS_TEST); -	SYS(fail, "ip -net %s link set dev lo up", NS_TEST); - -	nstoken = open_netns(NS_TEST); -	if (!ASSERT_OK_PTR(nstoken, "open_netns")) +	nstoken = create_netns(); +	if (!ASSERT_OK_PTR(nstoken, "create_netns"))  		goto fail;  	/* without MPTCP */ @@ -174,11 +221,104 @@ with_mptcp:  	close(server_fd);  fail: -	if (nstoken) -		close_netns(nstoken); +	cleanup_netns(nstoken); +	close(cgroup_fd); +} -	SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); +static void send_byte(int fd) +{ +	char b = 0x55; + +	ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte"); +} + +static int verify_mptcpify(int server_fd, int client_fd) +{ +	struct __mptcp_info info; +	socklen_t optlen; +	int protocol; +	int err = 0; + +	optlen = sizeof(protocol); +	if (!ASSERT_OK(getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen), +		       "getsockopt(SOL_PROTOCOL)")) +		return -1; + +	if (!ASSERT_EQ(protocol, IPPROTO_MPTCP, "protocol isn't MPTCP")) +		err++; +	optlen = sizeof(info); +	if (!ASSERT_OK(getsockopt(client_fd, SOL_MPTCP, MPTCP_INFO, &info, &optlen), +		       "getsockopt(MPTCP_INFO)")) +		return -1; + +	if (!ASSERT_GE(info.mptcpi_flags, 0, "unexpected mptcpi_flags")) +		err++; +	if (!ASSERT_FALSE(info.mptcpi_flags & MPTCP_INFO_FLAG_FALLBACK, +			  "MPTCP fallback")) +		err++; +	if (!ASSERT_TRUE(info.mptcpi_flags & MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED, +			 "no remote key received")) +		err++; + +	return err; +} + +static int run_mptcpify(int cgroup_fd) +{ +	int server_fd, client_fd, err = 0; +	struct mptcpify *mptcpify_skel; + +	mptcpify_skel = mptcpify__open_and_load(); +	if (!ASSERT_OK_PTR(mptcpify_skel, "skel_open_load")) +		return libbpf_get_error(mptcpify_skel); + +	err = mptcpify__attach(mptcpify_skel); +	if (!ASSERT_OK(err, "skel_attach")) +		goto out; + +	/* without MPTCP */ +	server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); +	if (!ASSERT_GE(server_fd, 0, "start_server")) { +		err = -EIO; +		goto out; +	} + +	client_fd = connect_to_fd(server_fd, 0); +	if (!ASSERT_GE(client_fd, 0, "connect to fd")) { +		err = -EIO; +		goto close_server; +	} + +	send_byte(client_fd); + +	err = verify_mptcpify(server_fd, client_fd); + +	close(client_fd); +close_server: +	close(server_fd); +out: +	mptcpify__destroy(mptcpify_skel); +	return err; +} + +static void test_mptcpify(void) +{ +	struct nstoken *nstoken = NULL; +	int cgroup_fd; + +	cgroup_fd = test__join_cgroup("/mptcpify"); +	if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) +		return; + +	nstoken = create_netns(); +	if (!ASSERT_OK_PTR(nstoken, "create_netns")) +		goto fail; + +	ASSERT_OK(run_mptcpify(cgroup_fd), "run_mptcpify"); + +fail: +	cleanup_netns(nstoken);  	close(cgroup_fd);  } @@ -186,4 +326,6 @@ void test_mptcp(void)  {  	if (test__start_subtest("base"))  		test_base(); +	if (test__start_subtest("mptcpify")) +		test_mptcpify();  } diff --git a/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c b/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c new file mode 100644 index 000000000000..4297a2a4cb11 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/netfilter_link_attach.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <netinet/in.h> +#include <linux/netfilter.h> + +#include "test_progs.h" +#include "test_netfilter_link_attach.skel.h" + +struct nf_link_test { +	__u32 pf; +	__u32 hooknum; +	__s32 priority; +	__u32 flags; + +	bool expect_success; +	const char * const name; +}; + +static const struct nf_link_test nf_hook_link_tests[] = { +	{ .name = "allzero", }, +	{ .pf = NFPROTO_NUMPROTO, .name = "invalid-pf", }, +	{ .pf = NFPROTO_IPV4, .hooknum = 42, .name = "invalid-hooknum", }, +	{ .pf = NFPROTO_IPV4, .priority = INT_MIN, .name = "invalid-priority-min", }, +	{ .pf = NFPROTO_IPV4, .priority = INT_MAX, .name = "invalid-priority-max", }, +	{ .pf = NFPROTO_IPV4, .flags = UINT_MAX, .name = "invalid-flags", }, + +	{ .pf = NFPROTO_INET, .priority = 1, .name = "invalid-inet-not-supported", }, + +	{ .pf = NFPROTO_IPV4, .priority = -10000, .expect_success = true, .name = "attach ipv4", }, +	{ .pf = NFPROTO_IPV6, .priority =  10001, .expect_success = true, .name = "attach ipv6", }, +}; + +void test_netfilter_link_attach(void) +{ +	struct test_netfilter_link_attach *skel; +	struct bpf_program *prog; +	LIBBPF_OPTS(bpf_netfilter_opts, opts); +	int i; + +	skel = test_netfilter_link_attach__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "test_netfilter_link_attach__open_and_load")) +		goto out; + +	prog = skel->progs.nf_link_attach_test; +	if (!ASSERT_OK_PTR(prog, "attach program")) +		goto out; + +	for (i = 0; i < ARRAY_SIZE(nf_hook_link_tests); i++) { +		struct bpf_link *link; + +		if (!test__start_subtest(nf_hook_link_tests[i].name)) +			continue; + +#define X(opts, m, i)	opts.m = nf_hook_link_tests[(i)].m +		X(opts, pf, i); +		X(opts, hooknum, i); +		X(opts, priority, i); +		X(opts, flags, i); +#undef X +		link = bpf_program__attach_netfilter(prog, &opts); +		if (nf_hook_link_tests[i].expect_success) { +			struct bpf_link *link2; + +			if (!ASSERT_OK_PTR(link, "program attach successful")) +				continue; + +			link2 = bpf_program__attach_netfilter(prog, &opts); +			ASSERT_ERR_PTR(link2, "attach program with same pf/hook/priority"); + +			if (!ASSERT_OK(bpf_link__destroy(link), "link destroy")) +				break; + +			link2 = bpf_program__attach_netfilter(prog, &opts); +			if (!ASSERT_OK_PTR(link2, "program reattach successful")) +				continue; +			if (!ASSERT_OK(bpf_link__destroy(link2), "link destroy")) +				break; +		} else { +			ASSERT_ERR_PTR(link, "program load failure"); +		} +	} + +out: +	test_netfilter_link_attach__destroy(skel); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c b/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c new file mode 100644 index 000000000000..8d077d150c56 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ptr_untrusted.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Yafang Shao <[email protected]> */ + +#include <string.h> +#include <linux/bpf.h> +#include <test_progs.h> +#include "test_ptr_untrusted.skel.h" + +#define TP_NAME "sched_switch" + +void serial_test_ptr_untrusted(void) +{ +	struct test_ptr_untrusted *skel; +	int err; + +	skel = test_ptr_untrusted__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	/* First, attach lsm prog */ +	skel->links.lsm_run = bpf_program__attach_lsm(skel->progs.lsm_run); +	if (!ASSERT_OK_PTR(skel->links.lsm_run, "lsm_attach")) +		goto cleanup; + +	/* Second, attach raw_tp prog. The lsm prog will be triggered. */ +	skel->links.raw_tp_run = bpf_program__attach_raw_tracepoint(skel->progs.raw_tp_run, +								    TP_NAME); +	if (!ASSERT_OK_PTR(skel->links.raw_tp_run, "raw_tp_attach")) +		goto cleanup; + +	err = strncmp(skel->bss->tp_name, TP_NAME, strlen(TP_NAME)); +	ASSERT_EQ(err, 0, "cmp_tp_name"); + +cleanup: +	test_ptr_untrusted__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c index 595cbf92bff5..d6bd5e16e637 100644 --- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -9,8 +9,38 @@  void test_refcounted_kptr(void)  { +	RUN_TESTS(refcounted_kptr);  }  void test_refcounted_kptr_fail(void)  { +	RUN_TESTS(refcounted_kptr_fail); +} + +void test_refcounted_kptr_wrong_owner(void) +{ +	LIBBPF_OPTS(bpf_test_run_opts, opts, +		    .data_in = &pkt_v4, +		    .data_size_in = sizeof(pkt_v4), +		    .repeat = 1, +	); +	struct refcounted_kptr *skel; +	int ret; + +	skel = refcounted_kptr__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) +		return; + +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts); +	ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1"); +	ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval"); + +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts); +	ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b"); +	ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval"); + +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts); +	ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2"); +	ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval"); +	refcounted_kptr__destroy(skel);  } diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c new file mode 100644 index 000000000000..f35852d245e3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Facebook */ +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <sys/types.h> +#include <sys/socket.h> +#include "sk_storage_omem_uncharge.skel.h" + +void test_sk_storage_omem_uncharge(void) +{ +	struct sk_storage_omem_uncharge *skel; +	int sk_fd = -1, map_fd, err, value; +	socklen_t optlen; + +	skel = sk_storage_omem_uncharge__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel open_and_load")) +		return; +	map_fd = bpf_map__fd(skel->maps.sk_storage); + +	/* A standalone socket not binding to addr:port, +	 * so nentns is not needed. +	 */ +	sk_fd = socket(AF_INET6, SOCK_STREAM, 0); +	if (!ASSERT_GE(sk_fd, 0, "socket")) +		goto done; + +	optlen = sizeof(skel->bss->cookie); +	err = getsockopt(sk_fd, SOL_SOCKET, SO_COOKIE, &skel->bss->cookie, &optlen); +	if (!ASSERT_OK(err, "getsockopt(SO_COOKIE)")) +		goto done; + +	value = 0; +	err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0); +	if (!ASSERT_OK(err, "bpf_map_update_elem(value=0)")) +		goto done; + +	value = 0xdeadbeef; +	err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0); +	if (!ASSERT_OK(err, "bpf_map_update_elem(value=0xdeadbeef)")) +		goto done; + +	err = sk_storage_omem_uncharge__attach(skel); +	if (!ASSERT_OK(err, "attach")) +		goto done; + +	close(sk_fd); +	sk_fd = -1; + +	ASSERT_EQ(skel->bss->cookie_found, 2, "cookie_found"); +	ASSERT_EQ(skel->bss->omem, 0, "omem"); + +done: +	sk_storage_omem_uncharge__destroy(skel); +	if (sk_fd != -1) +		close(sk_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h index d12665490a90..36d829a65aa4 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h @@ -179,6 +179,32 @@  		__ret;                                                         \  	}) +static inline int poll_connect(int fd, unsigned int timeout_sec) +{ +	struct timeval timeout = { .tv_sec = timeout_sec }; +	fd_set wfds; +	int r, eval; +	socklen_t esize = sizeof(eval); + +	FD_ZERO(&wfds); +	FD_SET(fd, &wfds); + +	r = select(fd + 1, NULL, &wfds, NULL, &timeout); +	if (r == 0) +		errno = ETIME; +	if (r != 1) +		return -1; + +	if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0) +		return -1; +	if (eval != 0) { +		errno = eval; +		return -1; +	} + +	return 0; +} +  static inline int poll_read(int fd, unsigned int timeout_sec)  {  	struct timeval timeout = { .tv_sec = timeout_sec }; diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index b4f6f3a50ae5..8df8cbb447f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -869,6 +869,77 @@ static void test_msg_redir_to_listening(struct test_sockmap_listen *skel,  	xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);  } +static void redir_partial(int family, int sotype, int sock_map, int parser_map) +{ +	int s, c0, c1, p0, p1; +	int err, n, key, value; +	char buf[] = "abc"; + +	key = 0; +	value = sizeof(buf) - 1; +	err = xbpf_map_update_elem(parser_map, &key, &value, 0); +	if (err) +		return; + +	s = socket_loopback(family, sotype | SOCK_NONBLOCK); +	if (s < 0) +		goto clean_parser_map; + +	err = create_socket_pairs(s, family, sotype, &c0, &c1, &p0, &p1); +	if (err) +		goto close_srv; + +	err = add_to_sockmap(sock_map, p0, p1); +	if (err) +		goto close; + +	n = xsend(c1, buf, sizeof(buf), 0); +	if (n < sizeof(buf)) +		FAIL("incomplete write"); + +	n = xrecv_nonblock(c0, buf, sizeof(buf), 0); +	if (n != sizeof(buf) - 1) +		FAIL("expect %zu, received %d", sizeof(buf) - 1, n); + +close: +	xclose(c0); +	xclose(p0); +	xclose(c1); +	xclose(p1); +close_srv: +	xclose(s); + +clean_parser_map: +	key = 0; +	value = 0; +	xbpf_map_update_elem(parser_map, &key, &value, 0); +} + +static void test_skb_redir_partial(struct test_sockmap_listen *skel, +				   struct bpf_map *inner_map, int family, +				   int sotype) +{ +	int verdict = bpf_program__fd(skel->progs.prog_stream_verdict); +	int parser = bpf_program__fd(skel->progs.prog_stream_parser); +	int parser_map = bpf_map__fd(skel->maps.parser_map); +	int sock_map = bpf_map__fd(inner_map); +	int err; + +	err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0); +	if (err) +		return; + +	err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0); +	if (err) +		goto detach; + +	redir_partial(family, sotype, sock_map, parser_map); + +	xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT); +detach: +	xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER); +} +  static void test_reuseport_select_listening(int family, int sotype,  					    int sock_map, int verd_map,  					    int reuseport_prog) @@ -1243,6 +1314,7 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,  	} tests[] = {  		TEST(test_skb_redir_to_connected),  		TEST(test_skb_redir_to_listening), +		TEST(test_skb_redir_partial),  		TEST(test_msg_redir_to_connected),  		TEST(test_msg_redir_to_listening),  	}; @@ -1380,11 +1452,18 @@ static int vsock_socketpair_connectible(int sotype, int *v0, int *v1)  	if (p < 0)  		goto close_cli; +	if (poll_connect(c, IO_TIMEOUT_SEC) < 0) { +		FAIL_ERRNO("poll_connect"); +		goto close_acc; +	} +  	*v0 = p;  	*v1 = c;  	return 0; +close_acc: +	close(p);  close_cli:  	close(c);  close_srv: @@ -1432,7 +1511,7 @@ static void vsock_unix_redir_connectible(int sock_mapfd, int verd_mapfd,  	if (n < 1)  		goto out; -	n = recv(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), MSG_DONTWAIT); +	n = xrecv_nonblock(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), 0);  	if (n < 0)  		FAIL("%s: recv() err, errno=%d", log_prefix, errno);  	if (n == 0) diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c index d9270bd3d920..f29c08d93beb 100644 --- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -1,4 +1,5 @@  // SPDX-License-Identifier: GPL-2.0 +#include <regex.h>  #include <test_progs.h>  #include <network_helpers.h> @@ -19,12 +20,16 @@ static struct {  	  "; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n"  	  "R1 type=map_value expected=percpu_ptr_" },  	{ "lock_id_mapval_preserve", -	  "8: (bf) r1 = r0                       ; R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0) " -	  "R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n9: (85) call bpf_this_cpu_ptr#154\n" +	  "[0-9]\\+: (bf) r1 = r0                       ;" +	  " R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)" +	  " R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n" +	  "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"  	  "R1 type=map_value expected=percpu_ptr_" },  	{ "lock_id_innermapval_preserve", -	  "13: (bf) r1 = r0                      ; R0=map_value(id=2,off=0,ks=4,vs=8,imm=0) " -	  "R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n14: (85) call bpf_this_cpu_ptr#154\n" +	  "[0-9]\\+: (bf) r1 = r0                      ;" +	  " R0=map_value(id=2,off=0,ks=4,vs=8,imm=0)" +	  " R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n" +	  "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n"  	  "R1 type=map_value expected=percpu_ptr_" },  	{ "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },  	{ "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" }, @@ -45,6 +50,24 @@ static struct {  	{ "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" },  }; +static int match_regex(const char *pattern, const char *string) +{ +	int err, rc; +	regex_t re; + +	err = regcomp(&re, pattern, REG_NOSUB); +	if (err) { +		char errbuf[512]; + +		regerror(err, &re, errbuf, sizeof(errbuf)); +		PRINT_FAIL("Can't compile regex: %s\n", errbuf); +		return -1; +	} +	rc = regexec(&re, string, 0, NULL, 0); +	regfree(&re); +	return rc == 0 ? 1 : 0; +} +  static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg)  {  	LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, @@ -74,7 +97,11 @@ static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg)  		goto end;  	} -	if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { +	ret = match_regex(err_msg, log_buf); +	if (!ASSERT_GE(ret, 0, "match_regex")) +		goto end; + +	if (!ASSERT_TRUE(ret, "no match for expected error message")) {  		fprintf(stderr, "Expected: %s\n", err_msg);  		fprintf(stderr, "Verifier: %s\n", log_buf);  	} diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c index 740d5f644b40..d4579f735398 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -79,6 +79,8 @@ static const char * const success_tests[] = {  	"test_task_from_pid_current",  	"test_task_from_pid_invalid",  	"task_kfunc_acquire_trusted_walked", +	"test_task_kfunc_flavor_relo", +	"test_task_kfunc_flavor_relo_not_found",  };  void test_task_kfunc(void) diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c index e873766276d1..48b55539331e 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c @@ -3,6 +3,7 @@  #include <test_progs.h>  #include <linux/pkt_cls.h> +#include "cap_helpers.h"  #include "test_tc_bpf.skel.h"  #define LO_IFINDEX 1 @@ -327,7 +328,7 @@ static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd)  	return 0;  } -void test_tc_bpf(void) +void tc_bpf_root(void)  {  	DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,  			    .attach_point = BPF_TC_INGRESS); @@ -393,3 +394,36 @@ end:  	}  	test_tc_bpf__destroy(skel);  } + +void tc_bpf_non_root(void) +{ +	struct test_tc_bpf *skel = NULL; +	__u64 caps = 0; +	int ret; + +	/* In case CAP_BPF and CAP_PERFMON is not set */ +	ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps); +	if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin")) +		return; +	ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL); +	if (!ASSERT_OK(ret, "disable_cap_sys_admin")) +		goto restore_cap; + +	skel = test_tc_bpf__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load")) +		goto restore_cap; + +	test_tc_bpf__destroy(skel); + +restore_cap: +	if (caps) +		cap_enable_effective(caps, NULL); +} + +void test_tc_bpf(void) +{ +	if (test__start_subtest("tc_bpf_root")) +		tc_bpf_root(); +	if (test__start_subtest("tc_bpf_non_root")) +		tc_bpf_non_root(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h new file mode 100644 index 000000000000..6c93215be8a3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Isovalent */ +#ifndef TC_HELPERS +#define TC_HELPERS +#include <test_progs.h> + +static inline __u32 id_from_prog_fd(int fd) +{ +	struct bpf_prog_info prog_info = {}; +	__u32 prog_info_len = sizeof(prog_info); +	int err; + +	err = bpf_obj_get_info_by_fd(fd, &prog_info, &prog_info_len); +	if (!ASSERT_OK(err, "id_from_prog_fd")) +		return 0; + +	ASSERT_NEQ(prog_info.id, 0, "prog_info.id"); +	return prog_info.id; +} + +static inline __u32 id_from_link_fd(int fd) +{ +	struct bpf_link_info link_info = {}; +	__u32 link_info_len = sizeof(link_info); +	int err; + +	err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); +	if (!ASSERT_OK(err, "id_from_link_fd")) +		return 0; + +	ASSERT_NEQ(link_info.id, 0, "link_info.id"); +	return link_info.id; +} + +static inline __u32 ifindex_from_link_fd(int fd) +{ +	struct bpf_link_info link_info = {}; +	__u32 link_info_len = sizeof(link_info); +	int err; + +	err = bpf_link_get_info_by_fd(fd, &link_info, &link_info_len); +	if (!ASSERT_OK(err, "id_from_link_fd")) +		return 0; + +	return link_info.tcx.ifindex; +} + +static inline void __assert_mprog_count(int target, int expected, bool miniq, int ifindex) +{ +	__u32 count = 0, attach_flags = 0; +	int err; + +	err = bpf_prog_query(ifindex, target, 0, &attach_flags, +			     NULL, &count); +	ASSERT_EQ(count, expected, "count"); +	if (!expected && !miniq) +		ASSERT_EQ(err, -ENOENT, "prog_query"); +	else +		ASSERT_EQ(err, 0, "prog_query"); +} + +static inline void assert_mprog_count(int target, int expected) +{ +	__assert_mprog_count(target, expected, false, loopback); +} + +static inline void assert_mprog_count_ifindex(int ifindex, int target, int expected) +{ +	__assert_mprog_count(target, expected, false, ifindex); +} + +#endif /* TC_HELPERS */ diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c new file mode 100644 index 000000000000..74fc1fe9ee26 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c @@ -0,0 +1,1919 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include <uapi/linux/if_link.h> +#include <uapi/linux/pkt_sched.h> +#include <net/if.h> +#include <test_progs.h> + +#define loopback 1 +#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +#include "test_tc_link.skel.h" +#include "tc_helpers.h" + +void serial_test_tc_links_basic(void) +{ +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 prog_ids[2], link_ids[2]; +	__u32 pid1, pid2, lid1, lid2; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + +	assert_mprog_count(BPF_TCX_INGRESS, 0); +	assert_mprog_count(BPF_TCX_EGRESS, 0); + +	ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + +	assert_mprog_count(BPF_TCX_INGRESS, 1); +	assert_mprog_count(BPF_TCX_EGRESS, 0); + +	optq.prog_ids = prog_ids; +	optq.link_ids = link_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 2, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); +	ASSERT_NEQ(lid1, lid2, "link_ids_1_2"); + +	assert_mprog_count(BPF_TCX_INGRESS, 1); +	assert_mprog_count(BPF_TCX_EGRESS, 1); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 2, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +cleanup: +	test_tc_link__destroy(skel); + +	assert_mprog_count(BPF_TCX_INGRESS, 0); +	assert_mprog_count(BPF_TCX_EGRESS, 0); +} + +static void test_tc_links_before_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 prog_ids[5], link_ids[5]; +	__u32 pid1, pid2, pid3, pid4; +	__u32 lid1, lid2, lid3, lid4; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), +		  0, "tc3_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), +		  0, "tc4_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); +	pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + +	assert_mprog_count(target, 1); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; +	optq.link_ids = link_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE, +		.relative_fd = bpf_program__fd(skel->progs.tc2), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc3 = link; + +	lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE | BPF_F_LINK, +		.relative_id = lid1, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc4 = link; + +	lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + +	assert_mprog_count(target, 4); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], pid2, "prog_ids[3]"); +	ASSERT_EQ(optq.link_ids[3], lid2, "link_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); +	ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_before(void) +{ +	test_tc_links_before_target(BPF_TCX_INGRESS); +	test_tc_links_before_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_after_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 prog_ids[5], link_ids[5]; +	__u32 pid1, pid2, pid3, pid4; +	__u32 lid1, lid2, lid3, lid4; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), +		  0, "tc3_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), +		  0, "tc4_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); +	pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + +	assert_mprog_count(target, 1); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; +	optq.link_ids = link_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER, +		.relative_fd = bpf_program__fd(skel->progs.tc1), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc3 = link; + +	lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER | BPF_F_LINK, +		.relative_fd = bpf_link__fd(skel->links.tc2), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc4 = link; + +	lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + +	assert_mprog_count(target, 4); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]"); +	ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); +	ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_after(void) +{ +	test_tc_links_after_target(BPF_TCX_INGRESS); +	test_tc_links_after_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_revision_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 prog_ids[3], link_ids[3]; +	__u32 pid1, pid2, lid1, lid2; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + +	assert_mprog_count(target, 0); + +	optl.expected_revision = 1; + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + +	assert_mprog_count(target, 1); + +	optl.expected_revision = 1; + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 1); + +	optl.expected_revision = 2; + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; +	optq.link_ids = link_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_revision(void) +{ +	test_tc_links_revision_target(BPF_TCX_INGRESS); +	test_tc_links_revision_target(BPF_TCX_EGRESS); +} + +static void test_tc_chain_classic(int target, bool chain_tc_old) +{ +	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); +	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); +	bool hook_created = false, tc_attached = false; +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 pid1, pid2, pid3; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	if (chain_tc_old) { +		tc_hook.attach_point = target == BPF_TCX_INGRESS ? +				       BPF_TC_INGRESS : BPF_TC_EGRESS; +		err = bpf_tc_hook_create(&tc_hook); +		if (err == 0) +			hook_created = true; +		err = err == -EEXIST ? 0 : err; +		if (!ASSERT_OK(err, "bpf_tc_hook_create")) +			goto cleanup; + +		tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3); +		err = bpf_tc_attach(&tc_hook, &tc_opts); +		if (!ASSERT_OK(err, "bpf_tc_attach")) +			goto cleanup; +		tc_attached = true; +	} + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	assert_mprog_count(target, 2); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; +	skel->bss->seen_tc3 = false; + +	err = bpf_link__detach(skel->links.tc2); +	if (!ASSERT_OK(err, "prog_detach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); +cleanup: +	if (tc_attached) { +		tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; +		err = bpf_tc_detach(&tc_hook, &tc_opts); +		ASSERT_OK(err, "bpf_tc_detach"); +	} +	if (hook_created) { +		tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; +		bpf_tc_hook_destroy(&tc_hook); +	} +	assert_mprog_count(target, 1); +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_chain_classic(void) +{ +	test_tc_chain_classic(BPF_TCX_INGRESS, false); +	test_tc_chain_classic(BPF_TCX_EGRESS, false); +	test_tc_chain_classic(BPF_TCX_INGRESS, true); +	test_tc_chain_classic(BPF_TCX_EGRESS, true); +} + +static void test_tc_links_replace_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 pid1, pid2, pid3, lid1, lid2; +	__u32 prog_ids[4], link_ids[4]; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), +		  0, "tc3_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	optl.expected_revision = 1; + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE, +		.relative_id = pid1, +		.expected_revision = 2, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; +	optq.link_ids = link_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; +	skel->bss->seen_tc3 = false; + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_REPLACE, +		.relative_fd = bpf_program__fd(skel->progs.tc2), +		.expected_revision = 3, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_REPLACE | BPF_F_LINK, +		.relative_fd = bpf_link__fd(skel->links.tc2), +		.expected_revision = 3, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_REPLACE | BPF_F_LINK | BPF_F_AFTER, +		.relative_id = lid2, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 2); + +	err = bpf_link__update_program(skel->links.tc2, skel->progs.tc3); +	if (!ASSERT_OK(err, "link_update")) +		goto cleanup; + +	assert_mprog_count(target, 2); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 4, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; +	skel->bss->seen_tc3 = false; + +	err = bpf_link__detach(skel->links.tc2); +	if (!ASSERT_OK(err, "link_detach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; +	skel->bss->seen_tc3 = false; + +	err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1); +	if (!ASSERT_OK(err, "link_update_self")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_replace(void) +{ +	test_tc_links_replace_target(BPF_TCX_INGRESS); +	test_tc_links_replace_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_invalid_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 pid1, pid2, lid1; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + +	assert_mprog_count(target, 0); + +	optl.flags = BPF_F_BEFORE | BPF_F_AFTER; + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE | BPF_F_ID, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER | BPF_F_ID, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_ID, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_LINK, +		.relative_fd = bpf_program__fd(skel->progs.tc2), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_LINK, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.relative_fd = bpf_program__fd(skel->progs.tc2), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE | BPF_F_AFTER, +		.relative_fd = bpf_program__fd(skel->progs.tc2), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE, +		.relative_fd = bpf_program__fd(skel->progs.tc1), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_ID, +		.relative_id = pid2, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_ID, +		.relative_id = 42, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE, +		.relative_fd = bpf_program__fd(skel->progs.tc1), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE | BPF_F_LINK, +		.relative_fd = bpf_program__fd(skel->progs.tc1), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER, +		.relative_fd = bpf_program__fd(skel->progs.tc1), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl); + +	link = bpf_program__attach_tcx(skel->progs.tc1, 0, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER | BPF_F_LINK, +		.relative_fd = bpf_program__fd(skel->progs.tc1), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optl); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER | BPF_F_LINK, +		.relative_fd = bpf_program__fd(skel->progs.tc1), +	); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, +		.relative_id = ~0, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, +		.relative_id = lid1, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE | BPF_F_ID, +		.relative_id = pid1, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { +		bpf_link__destroy(link); +		goto cleanup; +	} +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE | BPF_F_LINK | BPF_F_ID, +		.relative_id = lid1, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	assert_mprog_count(target, 2); +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_invalid(void) +{ +	test_tc_links_invalid_target(BPF_TCX_INGRESS); +	test_tc_links_invalid_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_prepend_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 prog_ids[5], link_ids[5]; +	__u32 pid1, pid2, pid3, pid4; +	__u32 lid1, lid2, lid3, lid4; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), +		  0, "tc3_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), +		  0, "tc4_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); +	pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; +	optq.link_ids = link_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc3 = link; + +	lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_BEFORE, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc4 = link; + +	lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + +	assert_mprog_count(target, 4); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid4, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid4, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid3, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid3, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], pid2, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], lid2, "link_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], pid1, "prog_ids[3]"); +	ASSERT_EQ(optq.link_ids[3], lid1, "link_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); +	ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_prepend(void) +{ +	test_tc_links_prepend_target(BPF_TCX_INGRESS); +	test_tc_links_prepend_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_append_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 prog_ids[5], link_ids[5]; +	__u32 pid1, pid2, pid3, pid4; +	__u32 lid1, lid2, lid3, lid4; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), +		  0, "tc3_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), +		  0, "tc4_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); +	pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; +	optq.link_ids = link_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc3, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc3 = link; + +	lid3 = id_from_link_fd(bpf_link__fd(skel->links.tc3)); + +	LIBBPF_OPTS_RESET(optl, +		.flags = BPF_F_AFTER, +	); + +	link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc4 = link; + +	lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + +	assert_mprog_count(target, 4); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(link_ids, 0, sizeof(link_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); +	ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); +	ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], pid3, "prog_ids[2]"); +	ASSERT_EQ(optq.link_ids[2], lid3, "link_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], pid4, "prog_ids[3]"); +	ASSERT_EQ(optq.link_ids[3], lid4, "link_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); +	ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_append(void) +{ +	test_tc_links_append_target(BPF_TCX_INGRESS); +	test_tc_links_append_target(BPF_TCX_EGRESS); +} + +static void test_tc_links_dev_cleanup_target(int target) +{ +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 pid1, pid2, pid3, pid4; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err, ifindex; + +	ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); +	ifindex = if_nametoindex("tcx_opts1"); +	ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), +		  0, "tc3_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), +		  0, "tc4_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); +	pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	assert_mprog_count_ifindex(ifindex, target, 1); + +	link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	assert_mprog_count_ifindex(ifindex, target, 2); + +	link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc3 = link; + +	assert_mprog_count_ifindex(ifindex, target, 3); + +	link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc4 = link; + +	assert_mprog_count_ifindex(ifindex, target, 4); + +	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); +	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); +	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); + +	ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex"); +	ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex"); +	ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex"); +	ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex"); + +	test_tc_link__destroy(skel); +	return; +cleanup: +	test_tc_link__destroy(skel); + +	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); +	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); +	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +} + +void serial_test_tc_links_dev_cleanup(void) +{ +	test_tc_links_dev_cleanup_target(BPF_TCX_INGRESS); +	test_tc_links_dev_cleanup_target(BPF_TCX_EGRESS); +} + +static void test_tc_chain_mixed(int target) +{ +	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); +	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	struct test_tc_link *skel; +	struct bpf_link *link; +	__u32 pid1, pid2, pid3; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), +		  0, "tc4_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc5, target), +		  0, "tc5_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc6, target), +		  0, "tc6_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc5)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc6)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	tc_hook.attach_point = target == BPF_TCX_INGRESS ? +			       BPF_TC_INGRESS : BPF_TC_EGRESS; +	err = bpf_tc_hook_create(&tc_hook); +	err = err == -EEXIST ? 0 : err; +	if (!ASSERT_OK(err, "bpf_tc_hook_create")) +		goto cleanup; + +	tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5); +	err = bpf_tc_attach(&tc_hook, &tc_opts); +	if (!ASSERT_OK(err, "bpf_tc_attach")) +		goto cleanup; + +	link = bpf_program__attach_tcx(skel->progs.tc6, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc6 = link; + +	assert_mprog_count(target, 1); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); +	ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5"); +	ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6"); + +	skel->bss->seen_tc4 = false; +	skel->bss->seen_tc5 = false; +	skel->bss->seen_tc6 = false; + +	err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4); +	if (!ASSERT_OK(err, "link_update")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +	ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); +	ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); + +	skel->bss->seen_tc4 = false; +	skel->bss->seen_tc5 = false; +	skel->bss->seen_tc6 = false; + +	err = bpf_link__detach(skel->links.tc6); +	if (!ASSERT_OK(err, "prog_detach")) +		goto cleanup; + +	__assert_mprog_count(target, 0, true, loopback); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); +	ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); +	ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); + +cleanup: +	tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; +	err = bpf_tc_detach(&tc_hook, &tc_opts); +	ASSERT_OK(err, "bpf_tc_detach"); + +	tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; +	bpf_tc_hook_destroy(&tc_hook); + +	test_tc_link__destroy(skel); +} + +void serial_test_tc_links_chain_mixed(void) +{ +	test_tc_chain_mixed(BPF_TCX_INGRESS); +	test_tc_chain_mixed(BPF_TCX_EGRESS); +} + +static void test_tc_links_ingress(int target, bool chain_tc_old, +				  bool tcx_teardown_first) +{ +	LIBBPF_OPTS(bpf_tc_opts, tc_opts, +		.handle		= 1, +		.priority	= 1, +	); +	LIBBPF_OPTS(bpf_tc_hook, tc_hook, +		.ifindex	= loopback, +		.attach_point	= BPF_TC_CUSTOM, +		.parent		= TC_H_INGRESS, +	); +	bool hook_created = false, tc_attached = false; +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 pid1, pid2, pid3; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	if (chain_tc_old) { +		ASSERT_OK(system("tc qdisc add dev lo ingress"), "add_ingress"); +		hook_created = true; + +		tc_opts.prog_fd = bpf_program__fd(skel->progs.tc3); +		err = bpf_tc_attach(&tc_hook, &tc_opts); +		if (!ASSERT_OK(err, "bpf_tc_attach")) +			goto cleanup; +		tc_attached = true; +	} + +	link = bpf_program__attach_tcx(skel->progs.tc1, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	assert_mprog_count(target, 2); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; +	skel->bss->seen_tc3 = false; + +	err = bpf_link__detach(skel->links.tc2); +	if (!ASSERT_OK(err, "prog_detach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); +cleanup: +	if (tc_attached) { +		tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; +		err = bpf_tc_detach(&tc_hook, &tc_opts); +		ASSERT_OK(err, "bpf_tc_detach"); +	} +	ASSERT_OK(system(ping_cmd), ping_cmd); +	assert_mprog_count(target, 1); +	if (hook_created && tcx_teardown_first) +		ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress"); +	ASSERT_OK(system(ping_cmd), ping_cmd); +	test_tc_link__destroy(skel); +	ASSERT_OK(system(ping_cmd), ping_cmd); +	if (hook_created && !tcx_teardown_first) +		ASSERT_OK(system("tc qdisc del dev lo ingress"), "del_ingress"); +	ASSERT_OK(system(ping_cmd), ping_cmd); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_links_ingress(void) +{ +	test_tc_links_ingress(BPF_TCX_INGRESS, true, true); +	test_tc_links_ingress(BPF_TCX_INGRESS, true, false); +	test_tc_links_ingress(BPF_TCX_INGRESS, false, false); +} + +static void test_tc_links_dev_mixed(int target) +{ +	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); +	LIBBPF_OPTS(bpf_tc_hook, tc_hook); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 pid1, pid2, pid3, pid4; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err, ifindex; + +	ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); +	ifindex = if_nametoindex("tcx_opts1"); +	ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), +		  0, "tc3_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), +		  0, "tc4_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); +	pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	link = bpf_program__attach_tcx(skel->progs.tc1, ifindex, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc1 = link; + +	assert_mprog_count_ifindex(ifindex, target, 1); + +	link = bpf_program__attach_tcx(skel->progs.tc2, ifindex, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc2 = link; + +	assert_mprog_count_ifindex(ifindex, target, 2); + +	link = bpf_program__attach_tcx(skel->progs.tc3, ifindex, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc3 = link; + +	assert_mprog_count_ifindex(ifindex, target, 3); + +	link = bpf_program__attach_tcx(skel->progs.tc4, ifindex, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup; + +	skel->links.tc4 = link; + +	assert_mprog_count_ifindex(ifindex, target, 4); + +	tc_hook.ifindex = ifindex; +	tc_hook.attach_point = target == BPF_TCX_INGRESS ? +			       BPF_TC_INGRESS : BPF_TC_EGRESS; + +	err = bpf_tc_hook_create(&tc_hook); +	err = err == -EEXIST ? 0 : err; +	if (!ASSERT_OK(err, "bpf_tc_hook_create")) +		goto cleanup; + +	tc_opts.prog_fd = bpf_program__fd(skel->progs.tc5); +	err = bpf_tc_attach(&tc_hook, &tc_opts); +	if (!ASSERT_OK(err, "bpf_tc_attach")) +		goto cleanup; + +	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); +	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); +	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); + +	ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc1)), 0, "tc1_ifindex"); +	ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc2)), 0, "tc2_ifindex"); +	ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc3)), 0, "tc3_ifindex"); +	ASSERT_EQ(ifindex_from_link_fd(bpf_link__fd(skel->links.tc4)), 0, "tc4_ifindex"); + +	test_tc_link__destroy(skel); +	return; +cleanup: +	test_tc_link__destroy(skel); + +	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); +	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); +	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +} + +void serial_test_tc_links_dev_mixed(void) +{ +	test_tc_links_dev_mixed(BPF_TCX_INGRESS); +	test_tc_links_dev_mixed(BPF_TCX_EGRESS); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c new file mode 100644 index 000000000000..7a2ecd4eca5d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c @@ -0,0 +1,2380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include <uapi/linux/if_link.h> +#include <net/if.h> +#include <test_progs.h> + +#define loopback 1 +#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +#include "test_tc_link.skel.h" +#include "tc_helpers.h" + +void serial_test_tc_opts_basic(void) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, id1, id2; +	struct test_tc_link *skel; +	__u32 prog_ids[2]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + +	assert_mprog_count(BPF_TCX_INGRESS, 0); +	assert_mprog_count(BPF_TCX_EGRESS, 0); + +	ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + +	err = bpf_prog_attach_opts(fd1, loopback, BPF_TCX_INGRESS, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(BPF_TCX_INGRESS, 1); +	assert_mprog_count(BPF_TCX_EGRESS, 0); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_in; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 2, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + +	err = bpf_prog_attach_opts(fd2, loopback, BPF_TCX_EGRESS, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_in; + +	assert_mprog_count(BPF_TCX_INGRESS, 1); +	assert_mprog_count(BPF_TCX_EGRESS, 1); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_eg; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 2, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + +cleanup_eg: +	err = bpf_prog_detach_opts(fd2, loopback, BPF_TCX_EGRESS, &optd); +	ASSERT_OK(err, "prog_detach_eg"); + +	assert_mprog_count(BPF_TCX_INGRESS, 1); +	assert_mprog_count(BPF_TCX_EGRESS, 0); + +cleanup_in: +	err = bpf_prog_detach_opts(fd1, loopback, BPF_TCX_INGRESS, &optd); +	ASSERT_OK(err, "prog_detach_in"); + +	assert_mprog_count(BPF_TCX_INGRESS, 0); +	assert_mprog_count(BPF_TCX_EGRESS, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +static void test_tc_opts_before_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; +	struct test_tc_link *skel; +	__u32 prog_ids[5]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); +	fd4 = bpf_program__fd(skel->progs.tc4); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); +	id4 = id_from_prog_fd(fd4); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id3, id4, "prog_ids_3_4"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target; + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd2, +	); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target2; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target3; + +	ASSERT_EQ(optq.count, 3, "count"); +	ASSERT_EQ(optq.revision, 4, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE, +		.relative_id = id1, +	); + +	err = bpf_prog_attach_opts(fd4, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target3; + +	assert_mprog_count(target, 4); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target4; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + +cleanup_target4: +	err = bpf_prog_detach_opts(fd4, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 3); + +cleanup_target3: +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup_target2: +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +cleanup_target: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_before(void) +{ +	test_tc_opts_before_target(BPF_TCX_INGRESS); +	test_tc_opts_before_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_after_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; +	struct test_tc_link *skel; +	__u32 prog_ids[5]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); +	fd4 = bpf_program__fd(skel->progs.tc4); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); +	id4 = id_from_prog_fd(fd4); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id3, id4, "prog_ids_3_4"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target; + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_AFTER, +		.relative_fd = fd1, +	); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target2; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target3; + +	ASSERT_EQ(optq.count, 3, "count"); +	ASSERT_EQ(optq.revision, 4, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_AFTER, +		.relative_id = id2, +	); + +	err = bpf_prog_attach_opts(fd4, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target3; + +	assert_mprog_count(target, 4); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target4; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + +cleanup_target4: +	err = bpf_prog_detach_opts(fd4, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 3); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target3; + +	ASSERT_EQ(optq.count, 3, "count"); +	ASSERT_EQ(optq.revision, 6, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + +cleanup_target3: +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 7, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +cleanup_target2: +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 8, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + +cleanup_target: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_after(void) +{ +	test_tc_opts_after_target(BPF_TCX_INGRESS); +	test_tc_opts_after_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_revision_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, id1, id2; +	struct test_tc_link *skel; +	__u32 prog_ids[3]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.expected_revision = 1, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.expected_revision = 1, +	); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, -ESTALE, "prog_attach")) +		goto cleanup_target; + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.expected_revision = 2, +	); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target; + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + +	LIBBPF_OPTS_RESET(optd, +		.expected_revision = 2, +	); + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_EQ(err, -ESTALE, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup_target2: +	LIBBPF_OPTS_RESET(optd, +		.expected_revision = 3, +	); + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +cleanup_target: +	LIBBPF_OPTS_RESET(optd); + +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_revision(void) +{ +	test_tc_opts_revision_target(BPF_TCX_INGRESS); +	test_tc_opts_revision_target(BPF_TCX_EGRESS); +} + +static void test_tc_chain_classic(int target, bool chain_tc_old) +{ +	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); +	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	bool hook_created = false, tc_attached = false; +	__u32 fd1, fd2, fd3, id1, id2, id3; +	struct test_tc_link *skel; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	if (chain_tc_old) { +		tc_hook.attach_point = target == BPF_TCX_INGRESS ? +				       BPF_TC_INGRESS : BPF_TC_EGRESS; +		err = bpf_tc_hook_create(&tc_hook); +		if (err == 0) +			hook_created = true; +		err = err == -EEXIST ? 0 : err; +		if (!ASSERT_OK(err, "bpf_tc_hook_create")) +			goto cleanup; + +		tc_opts.prog_fd = fd3; +		err = bpf_tc_attach(&tc_hook, &tc_opts); +		if (!ASSERT_OK(err, "bpf_tc_attach")) +			goto cleanup; +		tc_attached = true; +	} + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_detach; + +	assert_mprog_count(target, 2); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; +	skel->bss->seen_tc3 = false; + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	if (!ASSERT_OK(err, "prog_detach")) +		goto cleanup_detach; + +	assert_mprog_count(target, 1); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); + +cleanup_detach: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	if (!ASSERT_OK(err, "prog_detach")) +		goto cleanup; + +	__assert_mprog_count(target, 0, chain_tc_old, loopback); +cleanup: +	if (tc_attached) { +		tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; +		err = bpf_tc_detach(&tc_hook, &tc_opts); +		ASSERT_OK(err, "bpf_tc_detach"); +	} +	if (hook_created) { +		tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; +		bpf_tc_hook_destroy(&tc_hook); +	} +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_opts_chain_classic(void) +{ +	test_tc_chain_classic(BPF_TCX_INGRESS, false); +	test_tc_chain_classic(BPF_TCX_EGRESS, false); +	test_tc_chain_classic(BPF_TCX_INGRESS, true); +	test_tc_chain_classic(BPF_TCX_EGRESS, true); +} + +static void test_tc_opts_replace_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, id1, id2, id3, detach_fd; +	__u32 prog_ids[4], prog_flags[4]; +	struct test_tc_link *skel; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.expected_revision = 1, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE, +		.relative_id = id1, +		.expected_revision = 2, +	); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target; + +	detach_fd = fd2; + +	assert_mprog_count(target, 2); + +	optq.prog_attach_flags = prog_flags; +	optq.prog_ids = prog_ids; + +	memset(prog_flags, 0, sizeof(prog_flags)); +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]"); +	ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]"); +	ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; +	skel->bss->seen_tc3 = false; + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = fd2, +		.expected_revision = 3, +	); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target2; + +	detach_fd = fd3; + +	assert_mprog_count(target, 2); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 4, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); + +	skel->bss->seen_tc1 = false; +	skel->bss->seen_tc2 = false; +	skel->bss->seen_tc3 = false; + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE | BPF_F_BEFORE, +		.replace_prog_fd = fd3, +		.relative_fd = fd1, +		.expected_revision = 4, +	); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target2; + +	detach_fd = fd2; + +	assert_mprog_count(target, 2); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = fd2, +	); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	ASSERT_EQ(err, -EEXIST, "prog_attach"); +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE | BPF_F_AFTER, +		.replace_prog_fd = fd2, +		.relative_fd = fd1, +		.expected_revision = 5, +	); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	ASSERT_EQ(err, -ERANGE, "prog_attach"); +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE | BPF_F_AFTER | BPF_F_REPLACE, +		.replace_prog_fd = fd2, +		.relative_fd = fd1, +		.expected_revision = 5, +	); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	ASSERT_EQ(err, -ERANGE, "prog_attach"); +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +		.relative_id = id1, +		.expected_revision = 5, +	); + +cleanup_target2: +	err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +cleanup_target: +	LIBBPF_OPTS_RESET(optd); + +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_replace(void) +{ +	test_tc_opts_replace_target(BPF_TCX_INGRESS); +	test_tc_opts_replace_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_invalid_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	__u32 fd1, fd2, id1, id2; +	struct test_tc_link *skel; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE | BPF_F_AFTER, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -ERANGE, "prog_attach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE | BPF_F_ID, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -ENOENT, "prog_attach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_AFTER | BPF_F_ID, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -ENOENT, "prog_attach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.relative_fd = fd2, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -EINVAL, "prog_attach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE | BPF_F_AFTER, +		.relative_fd = fd2, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -ENOENT, "prog_attach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_ID, +		.relative_id = id2, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -EINVAL, "prog_attach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd1, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -ENOENT, "prog_attach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_AFTER, +		.relative_fd = fd1, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -ENOENT, "prog_attach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(opta); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -EEXIST, "prog_attach"); +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd1, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -EEXIST, "prog_attach"); +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_AFTER, +		.relative_fd = fd1, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -EEXIST, "prog_attach"); +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.relative_fd = fd1, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -EINVAL, "prog_attach_x1"); +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = fd1, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	ASSERT_EQ(err, -EEXIST, "prog_attach"); +	assert_mprog_count(target, 1); + +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_invalid(void) +{ +	test_tc_opts_invalid_target(BPF_TCX_INGRESS); +	test_tc_opts_invalid_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_prepend_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; +	struct test_tc_link *skel; +	__u32 prog_ids[5]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); +	fd4 = bpf_program__fd(skel->progs.tc4); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); +	id4 = id_from_prog_fd(fd4); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id3, id4, "prog_ids_3_4"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE, +	); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target; + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE, +	); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target2; + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_BEFORE, +	); + +	err = bpf_prog_attach_opts(fd4, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target3; + +	assert_mprog_count(target, 4); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target4; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + +cleanup_target4: +	err = bpf_prog_detach_opts(fd4, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 3); + +cleanup_target3: +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup_target2: +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +cleanup_target: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_prepend(void) +{ +	test_tc_opts_prepend_target(BPF_TCX_INGRESS); +	test_tc_opts_prepend_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_append_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; +	struct test_tc_link *skel; +	__u32 prog_ids[5]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); +	fd4 = bpf_program__fd(skel->progs.tc4); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); +	id4 = id_from_prog_fd(fd4); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id3, id4, "prog_ids_3_4"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_AFTER, +	); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target; + +	assert_mprog_count(target, 2); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target2; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 3, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_AFTER, +	); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target2; + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_AFTER, +	); + +	err = bpf_prog_attach_opts(fd4, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_target3; + +	assert_mprog_count(target, 4); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup_target4; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); +	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); + +cleanup_target4: +	err = bpf_prog_detach_opts(fd4, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 3); + +cleanup_target3: +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup_target2: +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +cleanup_target: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_append(void) +{ +	test_tc_opts_append_target(BPF_TCX_INGRESS); +	test_tc_opts_append_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_dev_cleanup_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; +	struct test_tc_link *skel; +	int err, ifindex; + +	ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); +	ifindex = if_nametoindex("tcx_opts1"); +	ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); +	fd4 = bpf_program__fd(skel->progs.tc4); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); +	id4 = id_from_prog_fd(fd4); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id3, id4, "prog_ids_3_4"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count_ifindex(ifindex, target, 0); + +	err = bpf_prog_attach_opts(fd1, ifindex, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count_ifindex(ifindex, target, 1); + +	err = bpf_prog_attach_opts(fd2, ifindex, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup1; + +	assert_mprog_count_ifindex(ifindex, target, 2); + +	err = bpf_prog_attach_opts(fd3, ifindex, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup2; + +	assert_mprog_count_ifindex(ifindex, target, 3); + +	err = bpf_prog_attach_opts(fd4, ifindex, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup3; + +	assert_mprog_count_ifindex(ifindex, target, 4); + +	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); +	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); +	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +	return; +cleanup3: +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count_ifindex(ifindex, target, 2); +cleanup2: +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count_ifindex(ifindex, target, 1); +cleanup1: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count_ifindex(ifindex, target, 0); +cleanup: +	test_tc_link__destroy(skel); + +	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); +	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); +	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +} + +void serial_test_tc_opts_dev_cleanup(void) +{ +	test_tc_opts_dev_cleanup_target(BPF_TCX_INGRESS); +	test_tc_opts_dev_cleanup_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_mixed_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	__u32 pid1, pid2, pid3, pid4, lid2, lid4; +	__u32 prog_flags[4], link_flags[4]; +	__u32 prog_ids[4], link_ids[4]; +	struct test_tc_link *skel; +	struct bpf_link *link; +	int err, detach_fd; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), +		  0, "tc3_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), +		  0, "tc4_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); +	pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); + +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); +	ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); +	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), +				   loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	detach_fd = bpf_program__fd(skel->progs.tc1); + +	assert_mprog_count(target, 1); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup1; +	skel->links.tc2 = link; + +	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = bpf_program__fd(skel->progs.tc1), +	); + +	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2), +				   loopback, target, &opta); +	ASSERT_EQ(err, -EEXIST, "prog_attach"); + +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = bpf_program__fd(skel->progs.tc2), +	); + +	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), +				   loopback, target, &opta); +	ASSERT_EQ(err, -EEXIST, "prog_attach"); + +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = bpf_program__fd(skel->progs.tc2), +	); + +	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3), +				   loopback, target, &opta); +	ASSERT_EQ(err, -EBUSY, "prog_attach"); + +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = bpf_program__fd(skel->progs.tc1), +	); + +	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3), +				   loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup1; + +	detach_fd = bpf_program__fd(skel->progs.tc3); + +	assert_mprog_count(target, 2); + +	link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup1; +	skel->links.tc4 = link; + +	lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); + +	assert_mprog_count(target, 3); + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = bpf_program__fd(skel->progs.tc4), +	); + +	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2), +				   loopback, target, &opta); +	ASSERT_EQ(err, -EEXIST, "prog_attach"); + +	optq.prog_ids = prog_ids; +	optq.prog_attach_flags = prog_flags; +	optq.link_ids = link_ids; +	optq.link_attach_flags = link_flags; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	memset(prog_flags, 0, sizeof(prog_flags)); +	memset(link_ids, 0, sizeof(link_ids)); +	memset(link_flags, 0, sizeof(link_flags)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup1; + +	ASSERT_EQ(optq.count, 3, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]"); +	ASSERT_EQ(optq.link_ids[0], 0, "link_ids[0]"); +	ASSERT_EQ(optq.link_attach_flags[0], 0, "link_flags[0]"); +	ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]"); +	ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); +	ASSERT_EQ(optq.link_attach_flags[1], 0, "link_flags[1]"); +	ASSERT_EQ(optq.prog_ids[2], pid4, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]"); +	ASSERT_EQ(optq.link_ids[2], lid4, "link_ids[2]"); +	ASSERT_EQ(optq.link_attach_flags[2], 0, "link_flags[2]"); +	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); +	ASSERT_EQ(optq.prog_attach_flags[3], 0, "prog_flags[3]"); +	ASSERT_EQ(optq.link_ids[3], 0, "link_ids[3]"); +	ASSERT_EQ(optq.link_attach_flags[3], 0, "link_flags[3]"); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +cleanup1: +	err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_opts_mixed(void) +{ +	test_tc_opts_mixed_target(BPF_TCX_INGRESS); +	test_tc_opts_mixed_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_demixed_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_tcx_opts, optl); +	struct test_tc_link *skel; +	struct bpf_link *link; +	__u32 pid1, pid2; +	int err; + +	skel = test_tc_link__open(); +	if (!ASSERT_OK_PTR(skel, "skel_open")) +		goto cleanup; + +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), +		  0, "tc1_attach_type"); +	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), +		  0, "tc2_attach_type"); + +	err = test_tc_link__load(skel); +	if (!ASSERT_OK(err, "skel_load")) +		goto cleanup; + +	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); +	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); +	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), +				   loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); +	if (!ASSERT_OK_PTR(link, "link_attach")) +		goto cleanup1; +	skel->links.tc2 = link; + +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_EQ(err, -EBUSY, "prog_detach"); + +	assert_mprog_count(target, 2); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 1); +	goto cleanup; + +cleanup1: +	err = bpf_prog_detach_opts(bpf_program__fd(skel->progs.tc1), +				   loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup: +	test_tc_link__destroy(skel); +	assert_mprog_count(target, 0); +} + +void serial_test_tc_opts_demixed(void) +{ +	test_tc_opts_demixed_target(BPF_TCX_INGRESS); +	test_tc_opts_demixed_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_detach_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; +	struct test_tc_link *skel; +	__u32 prog_ids[5]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); +	fd4 = bpf_program__fd(skel->progs.tc4); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); +	id4 = id_from_prog_fd(fd4); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id3, id4, "prog_ids_3_4"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup1; + +	assert_mprog_count(target, 2); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup2; + +	assert_mprog_count(target, 3); + +	err = bpf_prog_attach_opts(fd4, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup3; + +	assert_mprog_count(target, 4); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 3); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 3, "count"); +	ASSERT_EQ(optq.revision, 6, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 2); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 7, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	LIBBPF_OPTS_RESET(optd); + +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_EQ(err, -ENOENT, "prog_detach"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_EQ(err, -ENOENT, "prog_detach"); +	goto cleanup; + +cleanup4: +	err = bpf_prog_detach_opts(fd4, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 3); + +cleanup3: +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup2: +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +cleanup1: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_detach(void) +{ +	test_tc_opts_detach_target(BPF_TCX_INGRESS); +	test_tc_opts_detach_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_detach_before_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; +	struct test_tc_link *skel; +	__u32 prog_ids[5]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); +	fd4 = bpf_program__fd(skel->progs.tc4); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); +	id4 = id_from_prog_fd(fd4); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id3, id4, "prog_ids_3_4"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup1; + +	assert_mprog_count(target, 2); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup2; + +	assert_mprog_count(target, 3); + +	err = bpf_prog_attach_opts(fd4, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup3; + +	assert_mprog_count(target, 4); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd2, +	); + +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 3); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 3, "count"); +	ASSERT_EQ(optq.revision, 6, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd2, +	); + +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_EQ(err, -ENOENT, "prog_detach"); +	assert_mprog_count(target, 3); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd4, +	); + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_EQ(err, -ERANGE, "prog_detach"); +	assert_mprog_count(target, 3); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd1, +	); + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_EQ(err, -ENOENT, "prog_detach"); +	assert_mprog_count(target, 3); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd3, +	); + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 2); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 7, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +		.relative_fd = fd4, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 1); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 8, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_BEFORE, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 0); +	goto cleanup; + +cleanup4: +	err = bpf_prog_detach_opts(fd4, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 3); + +cleanup3: +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup2: +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +cleanup1: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_detach_before(void) +{ +	test_tc_opts_detach_before_target(BPF_TCX_INGRESS); +	test_tc_opts_detach_before_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_detach_after_target(int target) +{ +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	LIBBPF_OPTS(bpf_prog_query_opts, optq); +	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; +	struct test_tc_link *skel; +	__u32 prog_ids[5]; +	int err; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc1); +	fd2 = bpf_program__fd(skel->progs.tc2); +	fd3 = bpf_program__fd(skel->progs.tc3); +	fd4 = bpf_program__fd(skel->progs.tc4); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); +	id4 = id_from_prog_fd(fd4); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id3, id4, "prog_ids_3_4"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup; + +	assert_mprog_count(target, 1); + +	err = bpf_prog_attach_opts(fd2, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup1; + +	assert_mprog_count(target, 2); + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup2; + +	assert_mprog_count(target, 3); + +	err = bpf_prog_attach_opts(fd4, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup3; + +	assert_mprog_count(target, 4); + +	optq.prog_ids = prog_ids; + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 4, "count"); +	ASSERT_EQ(optq.revision, 5, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); +	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +		.relative_fd = fd1, +	); + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 3); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 3, "count"); +	ASSERT_EQ(optq.revision, 6, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); +	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +		.relative_fd = fd1, +	); + +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_EQ(err, -ENOENT, "prog_detach"); +	assert_mprog_count(target, 3); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +		.relative_fd = fd4, +	); + +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_EQ(err, -ERANGE, "prog_detach"); +	assert_mprog_count(target, 3); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +		.relative_fd = fd3, +	); + +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_EQ(err, -ERANGE, "prog_detach"); +	assert_mprog_count(target, 3); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +		.relative_fd = fd1, +	); + +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_EQ(err, -ERANGE, "prog_detach"); +	assert_mprog_count(target, 3); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +		.relative_fd = fd1, +	); + +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 2); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 2, "count"); +	ASSERT_EQ(optq.revision, 7, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]"); +	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +		.relative_fd = fd1, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 1); + +	memset(prog_ids, 0, sizeof(prog_ids)); +	optq.count = ARRAY_SIZE(prog_ids); + +	err = bpf_prog_query_opts(loopback, target, &optq); +	if (!ASSERT_OK(err, "prog_query")) +		goto cleanup4; + +	ASSERT_EQ(optq.count, 1, "count"); +	ASSERT_EQ(optq.revision, 8, "revision"); +	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); +	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + +	LIBBPF_OPTS_RESET(optd, +		.flags = BPF_F_AFTER, +	); + +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); + +	assert_mprog_count(target, 0); +	goto cleanup; + +cleanup4: +	err = bpf_prog_detach_opts(fd4, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 3); + +cleanup3: +	err = bpf_prog_detach_opts(fd3, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 2); + +cleanup2: +	err = bpf_prog_detach_opts(fd2, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 1); + +cleanup1: +	err = bpf_prog_detach_opts(fd1, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	assert_mprog_count(target, 0); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_detach_after(void) +{ +	test_tc_opts_detach_after_target(BPF_TCX_INGRESS); +	test_tc_opts_detach_after_target(BPF_TCX_EGRESS); +} + +static void test_tc_opts_delete_empty(int target, bool chain_tc_old) +{ +	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	int err; + +	assert_mprog_count(target, 0); +	if (chain_tc_old) { +		tc_hook.attach_point = target == BPF_TCX_INGRESS ? +				       BPF_TC_INGRESS : BPF_TC_EGRESS; +		err = bpf_tc_hook_create(&tc_hook); +		ASSERT_OK(err, "bpf_tc_hook_create"); +		__assert_mprog_count(target, 0, true, loopback); +	} +	err = bpf_prog_detach_opts(0, loopback, target, &optd); +	ASSERT_EQ(err, -ENOENT, "prog_detach"); +	if (chain_tc_old) { +		tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; +		bpf_tc_hook_destroy(&tc_hook); +	} +	assert_mprog_count(target, 0); +} + +void serial_test_tc_opts_delete_empty(void) +{ +	test_tc_opts_delete_empty(BPF_TCX_INGRESS, false); +	test_tc_opts_delete_empty(BPF_TCX_EGRESS, false); +	test_tc_opts_delete_empty(BPF_TCX_INGRESS, true); +	test_tc_opts_delete_empty(BPF_TCX_EGRESS, true); +} + +static void test_tc_chain_mixed(int target) +{ +	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); +	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); +	LIBBPF_OPTS(bpf_prog_attach_opts, opta); +	LIBBPF_OPTS(bpf_prog_detach_opts, optd); +	__u32 fd1, fd2, fd3, id1, id2, id3; +	struct test_tc_link *skel; +	int err, detach_fd; + +	skel = test_tc_link__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "skel_load")) +		goto cleanup; + +	fd1 = bpf_program__fd(skel->progs.tc4); +	fd2 = bpf_program__fd(skel->progs.tc5); +	fd3 = bpf_program__fd(skel->progs.tc6); + +	id1 = id_from_prog_fd(fd1); +	id2 = id_from_prog_fd(fd2); +	id3 = id_from_prog_fd(fd3); + +	ASSERT_NEQ(id1, id2, "prog_ids_1_2"); +	ASSERT_NEQ(id2, id3, "prog_ids_2_3"); + +	assert_mprog_count(target, 0); + +	tc_hook.attach_point = target == BPF_TCX_INGRESS ? +			       BPF_TC_INGRESS : BPF_TC_EGRESS; +	err = bpf_tc_hook_create(&tc_hook); +	err = err == -EEXIST ? 0 : err; +	if (!ASSERT_OK(err, "bpf_tc_hook_create")) +		goto cleanup; + +	tc_opts.prog_fd = fd2; +	err = bpf_tc_attach(&tc_hook, &tc_opts); +	if (!ASSERT_OK(err, "bpf_tc_attach")) +		goto cleanup_hook; + +	err = bpf_prog_attach_opts(fd3, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_filter; + +	detach_fd = fd3; + +	assert_mprog_count(target, 1); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); +	ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5"); +	ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6"); + +	skel->bss->seen_tc4 = false; +	skel->bss->seen_tc5 = false; +	skel->bss->seen_tc6 = false; + +	LIBBPF_OPTS_RESET(opta, +		.flags = BPF_F_REPLACE, +		.replace_prog_fd = fd3, +	); + +	err = bpf_prog_attach_opts(fd1, loopback, target, &opta); +	if (!ASSERT_EQ(err, 0, "prog_attach")) +		goto cleanup_opts; + +	detach_fd = fd1; + +	assert_mprog_count(target, 1); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); +	ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); +	ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); + +	skel->bss->seen_tc4 = false; +	skel->bss->seen_tc5 = false; +	skel->bss->seen_tc6 = false; + +cleanup_opts: +	err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); +	ASSERT_OK(err, "prog_detach"); +	__assert_mprog_count(target, 0, true, loopback); + +	ASSERT_OK(system(ping_cmd), ping_cmd); + +	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); +	ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); +	ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); + +cleanup_filter: +	tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; +	err = bpf_tc_detach(&tc_hook, &tc_opts); +	ASSERT_OK(err, "bpf_tc_detach"); + +cleanup_hook: +	tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; +	bpf_tc_hook_destroy(&tc_hook); + +cleanup: +	test_tc_link__destroy(skel); +} + +void serial_test_tc_opts_chain_mixed(void) +{ +	test_tc_chain_mixed(BPF_TCX_INGRESS); +	test_tc_chain_mixed(BPF_TCX_EGRESS); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c index 13bcaeb028b8..56685fc03c7e 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c @@ -347,7 +347,7 @@ static void syncookie_estab(void)  	exp_active_estab_in.max_delack_ms = 22;  	exp_passive_hdr_stg.syncookie = true; -	exp_active_hdr_stg.resend_syn = true, +	exp_active_hdr_stg.resend_syn = true;  	prepare_out(); diff --git a/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c new file mode 100644 index 000000000000..375677c19146 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.*/ + +#include <test_progs.h> +#include <network_helpers.h> +#include "test_ldsx_insn.skel.h" + +static void test_map_val_and_probed_memory(void) +{ +	struct test_ldsx_insn *skel; +	int err; + +	skel = test_ldsx_insn__open(); +	if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) +		return; + +	if (skel->rodata->skip) { +		test__skip(); +		goto out; +	} + +	bpf_program__set_autoload(skel->progs.rdonly_map_prog, true); +	bpf_program__set_autoload(skel->progs.map_val_prog, true); +	bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true); + +	err = test_ldsx_insn__load(skel); +	if (!ASSERT_OK(err, "test_ldsx_insn__load")) +		goto out; + +	err = test_ldsx_insn__attach(skel); +	if (!ASSERT_OK(err, "test_ldsx_insn__attach")) +		goto out; + +	ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + +	ASSERT_EQ(skel->bss->done1, 1, "done1"); +	ASSERT_EQ(skel->bss->ret1, 1, "ret1"); +	ASSERT_EQ(skel->bss->done2, 1, "done2"); +	ASSERT_EQ(skel->bss->ret2, 1, "ret2"); +	ASSERT_EQ(skel->bss->int_member, -1, "int_member"); + +out: +	test_ldsx_insn__destroy(skel); +} + +static void test_ctx_member_sign_ext(void) +{ +	struct test_ldsx_insn *skel; +	int err, fd, cgroup_fd; +	char buf[16] = {0}; +	socklen_t optlen; + +	cgroup_fd = test__join_cgroup("/ldsx_test"); +	if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /ldsx_test")) +		return; + +	skel = test_ldsx_insn__open(); +	if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) +		goto close_cgroup_fd; + +	if (skel->rodata->skip) { +		test__skip(); +		goto destroy_skel; +	} + +	bpf_program__set_autoload(skel->progs._getsockopt, true); + +	err = test_ldsx_insn__load(skel); +	if (!ASSERT_OK(err, "test_ldsx_insn__load")) +		goto destroy_skel; + +	skel->links._getsockopt = +		bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd); +	if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link")) +		goto destroy_skel; + +	fd = socket(AF_INET, SOCK_STREAM, 0); +	if (!ASSERT_GE(fd, 0, "socket")) +		goto destroy_skel; + +	optlen = sizeof(buf); +	(void)getsockopt(fd, SOL_IP, IP_TTL, buf, &optlen); + +	ASSERT_EQ(skel->bss->set_optlen, -1, "optlen"); +	ASSERT_EQ(skel->bss->set_retval, -1, "retval"); + +	close(fd); +destroy_skel: +	test_ldsx_insn__destroy(skel); +close_cgroup_fd: +	close(cgroup_fd); +} + +static void test_ctx_member_narrow_sign_ext(void) +{ +	struct test_ldsx_insn *skel; +	struct __sk_buff skb = {}; +	LIBBPF_OPTS(bpf_test_run_opts, topts, +		    .data_in = &pkt_v4, +		    .data_size_in = sizeof(pkt_v4), +		    .ctx_in = &skb, +		    .ctx_size_in = sizeof(skb), +	); +	int err, prog_fd; + +	skel = test_ldsx_insn__open(); +	if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) +		return; + +	if (skel->rodata->skip) { +		test__skip(); +		goto out; +	} + +	bpf_program__set_autoload(skel->progs._tc, true); + +	err = test_ldsx_insn__load(skel); +	if (!ASSERT_OK(err, "test_ldsx_insn__load")) +		goto out; + +	prog_fd = bpf_program__fd(skel->progs._tc); +	err = bpf_prog_test_run_opts(prog_fd, &topts); +	ASSERT_OK(err, "test_run"); + +	ASSERT_EQ(skel->bss->set_mark, -2, "set_mark"); + +out: +	test_ldsx_insn__destroy(skel); +} + +void test_ldsx_insn(void) +{ +	if (test__start_subtest("map_val and probed_memory")) +		test_map_val_and_probed_memory(); +	if (test__start_subtest("ctx_member_sign_ext")) +		test_ctx_member_sign_ext(); +	if (test__start_subtest("ctx_member_narrow_sign_ext")) +		test_ctx_member_narrow_sign_ext(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index 1c75a32186d6..fe0fb0c9849a 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -55,6 +55,25 @@ static void test_fentry(void)  	ASSERT_EQ(skel->bss->t6, 1, "t6 ret"); +	ASSERT_EQ(skel->bss->t7_a, 16, "t7:a"); +	ASSERT_EQ(skel->bss->t7_b, 17, "t7:b"); +	ASSERT_EQ(skel->bss->t7_c, 18, "t7:c"); +	ASSERT_EQ(skel->bss->t7_d, 19, "t7:d"); +	ASSERT_EQ(skel->bss->t7_e, 20, "t7:e"); +	ASSERT_EQ(skel->bss->t7_f_a, 21, "t7:f.a"); +	ASSERT_EQ(skel->bss->t7_f_b, 22, "t7:f.b"); +	ASSERT_EQ(skel->bss->t7_ret, 133, "t7 ret"); + +	ASSERT_EQ(skel->bss->t8_a, 16, "t8:a"); +	ASSERT_EQ(skel->bss->t8_b, 17, "t8:b"); +	ASSERT_EQ(skel->bss->t8_c, 18, "t8:c"); +	ASSERT_EQ(skel->bss->t8_d, 19, "t8:d"); +	ASSERT_EQ(skel->bss->t8_e, 20, "t8:e"); +	ASSERT_EQ(skel->bss->t8_f_a, 21, "t8:f.a"); +	ASSERT_EQ(skel->bss->t8_f_b, 22, "t8:f.b"); +	ASSERT_EQ(skel->bss->t8_g, 23, "t8:g"); +	ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret"); +  	tracing_struct__detach(skel);  destroy_skel:  	tracing_struct__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c index e91d0d1769f1..6cd7349d4a2b 100644 --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -88,8 +88,8 @@ void serial_test_trampoline_count(void)  	if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))  		goto cleanup; -	ASSERT_EQ(opts.retval & 0xffff, 4, "bpf_modify_return_test.result"); -	ASSERT_EQ(opts.retval >> 16, 1, "bpf_modify_return_test.side_effect"); +	ASSERT_EQ(opts.retval & 0xffff, 33, "bpf_modify_return_test.result"); +	ASSERT_EQ(opts.retval >> 16, 2, "bpf_modify_return_test.side_effect");  cleanup:  	for (; i >= 0; i--) { diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c new file mode 100644 index 000000000000..cd051d3901a9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <unistd.h> +#include <test_progs.h> +#include "uprobe_multi.skel.h" +#include "uprobe_multi_bench.skel.h" +#include "uprobe_multi_usdt.skel.h" +#include "bpf/libbpf_internal.h" +#include "testing_helpers.h" + +static char test_data[] = "test_data"; + +noinline void uprobe_multi_func_1(void) +{ +	asm volatile (""); +} + +noinline void uprobe_multi_func_2(void) +{ +	asm volatile (""); +} + +noinline void uprobe_multi_func_3(void) +{ +	asm volatile (""); +} + +struct child { +	int go[2]; +	int pid; +}; + +static void release_child(struct child *child) +{ +	int child_status; + +	if (!child) +		return; +	close(child->go[1]); +	close(child->go[0]); +	if (child->pid > 0) +		waitpid(child->pid, &child_status, 0); +} + +static void kick_child(struct child *child) +{ +	char c = 1; + +	if (child) { +		write(child->go[1], &c, 1); +		release_child(child); +	} +	fflush(NULL); +} + +static struct child *spawn_child(void) +{ +	static struct child child; +	int err; +	int c; + +	/* pipe to notify child to execute the trigger functions */ +	if (pipe(child.go)) +		return NULL; + +	child.pid = fork(); +	if (child.pid < 0) { +		release_child(&child); +		errno = EINVAL; +		return NULL; +	} + +	/* child */ +	if (child.pid == 0) { +		close(child.go[1]); + +		/* wait for parent's kick */ +		err = read(child.go[0], &c, 1); +		if (err != 1) +			exit(err); + +		uprobe_multi_func_1(); +		uprobe_multi_func_2(); +		uprobe_multi_func_3(); + +		exit(errno); +	} + +	return &child; +} + +static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) +{ +	skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; +	skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; +	skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + +	skel->bss->user_ptr = test_data; + +	/* +	 * Disable pid check in bpf program if we are pid filter test, +	 * because the probe should be executed only by child->pid +	 * passed at the probe attach. +	 */ +	skel->bss->pid = child ? 0 : getpid(); + +	if (child) +		kick_child(child); + +	/* trigger all probes */ +	uprobe_multi_func_1(); +	uprobe_multi_func_2(); +	uprobe_multi_func_3(); + +	/* +	 * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123] +	 * function and each slepable probe (6) increments uprobe_multi_sleep_result. +	 */ +	ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result"); +	ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result"); +	ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result"); + +	ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result"); +	ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result"); +	ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result"); + +	ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result"); + +	if (child) +		ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid"); +} + +static void test_skel_api(void) +{ +	struct uprobe_multi *skel = NULL; +	int err; + +	skel = uprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) +		goto cleanup; + +	err = uprobe_multi__attach(skel); +	if (!ASSERT_OK(err, "uprobe_multi__attach")) +		goto cleanup; + +	uprobe_multi_test_run(skel, NULL); + +cleanup: +	uprobe_multi__destroy(skel); +} + +static void +__test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts, +		  struct child *child) +{ +	pid_t pid = child ? child->pid : -1; +	struct uprobe_multi *skel = NULL; + +	skel = uprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) +		goto cleanup; + +	opts->retprobe = false; +	skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid, +							      binary, pattern, opts); +	if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) +		goto cleanup; + +	opts->retprobe = true; +	skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid, +								 binary, pattern, opts); +	if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi")) +		goto cleanup; + +	opts->retprobe = false; +	skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid, +								    binary, pattern, opts); +	if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi")) +		goto cleanup; + +	opts->retprobe = true; +	skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep, +								       pid, binary, pattern, opts); +	if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi")) +		goto cleanup; + +	opts->retprobe = false; +	skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1, +								    binary, pattern, opts); +	if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi")) +		goto cleanup; + +	uprobe_multi_test_run(skel, child); + +cleanup: +	uprobe_multi__destroy(skel); +} + +static void +test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) +{ +	struct child *child; + +	/* no pid filter */ +	__test_attach_api(binary, pattern, opts, NULL); + +	/* pid filter */ +	child = spawn_child(); +	if (!ASSERT_OK_PTR(child, "spawn_child")) +		return; + +	__test_attach_api(binary, pattern, opts, child); +} + +static void test_attach_api_pattern(void) +{ +	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + +	test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts); +	test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts); +} + +static void test_attach_api_syms(void) +{ +	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); +	const char *syms[3] = { +		"uprobe_multi_func_1", +		"uprobe_multi_func_2", +		"uprobe_multi_func_3", +	}; + +	opts.syms = syms; +	opts.cnt = ARRAY_SIZE(syms); +	test_attach_api("/proc/self/exe", NULL, &opts); +} + +static void __test_link_api(struct child *child) +{ +	int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1; +	LIBBPF_OPTS(bpf_link_create_opts, opts); +	const char *path = "/proc/self/exe"; +	struct uprobe_multi *skel = NULL; +	unsigned long *offsets = NULL; +	const char *syms[3] = { +		"uprobe_multi_func_1", +		"uprobe_multi_func_2", +		"uprobe_multi_func_3", +	}; +	int link_extra_fd = -1; +	int err; + +	err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets); +	if (!ASSERT_OK(err, "elf_resolve_syms_offsets")) +		return; + +	opts.uprobe_multi.path = path; +	opts.uprobe_multi.offsets = offsets; +	opts.uprobe_multi.cnt = ARRAY_SIZE(syms); +	opts.uprobe_multi.pid = child ? child->pid : 0; + +	skel = uprobe_multi__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) +		goto cleanup; + +	opts.kprobe_multi.flags = 0; +	prog_fd = bpf_program__fd(skel->progs.uprobe); +	link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); +	if (!ASSERT_GE(link1_fd, 0, "link1_fd")) +		goto cleanup; + +	opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; +	prog_fd = bpf_program__fd(skel->progs.uretprobe); +	link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); +	if (!ASSERT_GE(link2_fd, 0, "link2_fd")) +		goto cleanup; + +	opts.kprobe_multi.flags = 0; +	prog_fd = bpf_program__fd(skel->progs.uprobe_sleep); +	link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); +	if (!ASSERT_GE(link3_fd, 0, "link3_fd")) +		goto cleanup; + +	opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; +	prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep); +	link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); +	if (!ASSERT_GE(link4_fd, 0, "link4_fd")) +		goto cleanup; + +	opts.kprobe_multi.flags = 0; +	opts.uprobe_multi.pid = 0; +	prog_fd = bpf_program__fd(skel->progs.uprobe_extra); +	link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); +	if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd")) +		goto cleanup; + +	uprobe_multi_test_run(skel, child); + +cleanup: +	if (link1_fd >= 0) +		close(link1_fd); +	if (link2_fd >= 0) +		close(link2_fd); +	if (link3_fd >= 0) +		close(link3_fd); +	if (link4_fd >= 0) +		close(link4_fd); +	if (link_extra_fd >= 0) +		close(link_extra_fd); + +	uprobe_multi__destroy(skel); +	free(offsets); +} + +void test_link_api(void) +{ +	struct child *child; + +	/* no pid filter */ +	__test_link_api(NULL); + +	/* pid filter */ +	child = spawn_child(); +	if (!ASSERT_OK_PTR(child, "spawn_child")) +		return; + +	__test_link_api(child); +} + +static void test_bench_attach_uprobe(void) +{ +	long attach_start_ns = 0, attach_end_ns = 0; +	struct uprobe_multi_bench *skel = NULL; +	long detach_start_ns, detach_end_ns; +	double attach_delta, detach_delta; +	int err; + +	skel = uprobe_multi_bench__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load")) +		goto cleanup; + +	attach_start_ns = get_time_ns(); + +	err = uprobe_multi_bench__attach(skel); +	if (!ASSERT_OK(err, "uprobe_multi_bench__attach")) +		goto cleanup; + +	attach_end_ns = get_time_ns(); + +	system("./uprobe_multi bench"); + +	ASSERT_EQ(skel->bss->count, 50000, "uprobes_count"); + +cleanup: +	detach_start_ns = get_time_ns(); +	uprobe_multi_bench__destroy(skel); +	detach_end_ns = get_time_ns(); + +	attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; +	detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + +	printf("%s: attached in %7.3lfs\n", __func__, attach_delta); +	printf("%s: detached in %7.3lfs\n", __func__, detach_delta); +} + +static void test_bench_attach_usdt(void) +{ +	long attach_start_ns = 0, attach_end_ns = 0; +	struct uprobe_multi_usdt *skel = NULL; +	long detach_start_ns, detach_end_ns; +	double attach_delta, detach_delta; + +	skel = uprobe_multi_usdt__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open")) +		goto cleanup; + +	attach_start_ns = get_time_ns(); + +	skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi", +						     "test", "usdt", NULL); +	if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt")) +		goto cleanup; + +	attach_end_ns = get_time_ns(); + +	system("./uprobe_multi usdt"); + +	ASSERT_EQ(skel->bss->count, 50000, "usdt_count"); + +cleanup: +	detach_start_ns = get_time_ns(); +	uprobe_multi_usdt__destroy(skel); +	detach_end_ns = get_time_ns(); + +	attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; +	detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + +	printf("%s: attached in %7.3lfs\n", __func__, attach_delta); +	printf("%s: detached in %7.3lfs\n", __func__, detach_delta); +} + +void test_uprobe_multi_test(void) +{ +	if (test__start_subtest("skel_api")) +		test_skel_api(); +	if (test__start_subtest("attach_api_pattern")) +		test_attach_api_pattern(); +	if (test__start_subtest("attach_api_syms")) +		test_attach_api_syms(); +	if (test__start_subtest("link_api")) +		test_link_api(); +	if (test__start_subtest("bench_uprobe")) +		test_bench_attach_uprobe(); +	if (test__start_subtest("bench_usdt")) +		test_bench_attach_usdt(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 070a13833c3f..e3e68c97b40c 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -11,6 +11,7 @@  #include "verifier_bounds_deduction_non_const.skel.h"  #include "verifier_bounds_mix_sign_unsign.skel.h"  #include "verifier_bpf_get_stack.skel.h" +#include "verifier_bswap.skel.h"  #include "verifier_btf_ctx_access.skel.h"  #include "verifier_cfg.skel.h"  #include "verifier_cgroup_inv_retcode.skel.h" @@ -24,6 +25,7 @@  #include "verifier_direct_stack_access_wraparound.skel.h"  #include "verifier_div0.skel.h"  #include "verifier_div_overflow.skel.h" +#include "verifier_gotol.skel.h"  #include "verifier_helper_access_var_len.skel.h"  #include "verifier_helper_packet_access.skel.h"  #include "verifier_helper_restricted.skel.h" @@ -31,6 +33,7 @@  #include "verifier_int_ptr.skel.h"  #include "verifier_jeq_infer_not_null.skel.h"  #include "verifier_ld_ind.skel.h" +#include "verifier_ldsx.skel.h"  #include "verifier_leak_ptr.skel.h"  #include "verifier_loops1.skel.h"  #include "verifier_lwt.skel.h" @@ -40,6 +43,7 @@  #include "verifier_map_ret_val.skel.h"  #include "verifier_masking.skel.h"  #include "verifier_meta_access.skel.h" +#include "verifier_movsx.skel.h"  #include "verifier_netfilter_ctx.skel.h"  #include "verifier_netfilter_retcode.skel.h"  #include "verifier_prevent_map_lookup.skel.h" @@ -51,6 +55,7 @@  #include "verifier_ringbuf.skel.h"  #include "verifier_runtime_jit.skel.h"  #include "verifier_scalar_ids.skel.h" +#include "verifier_sdiv.skel.h"  #include "verifier_search_pruning.skel.h"  #include "verifier_sock.skel.h"  #include "verifier_spill_fill.skel.h" @@ -58,6 +63,7 @@  #include "verifier_stack_ptr.skel.h"  #include "verifier_subprog_precision.skel.h"  #include "verifier_subreg.skel.h" +#include "verifier_typedef.skel.h"  #include "verifier_uninit.skel.h"  #include "verifier_unpriv.skel.h"  #include "verifier_unpriv_perf.skel.h" @@ -112,6 +118,7 @@ void test_verifier_bounds_deduction(void)     { RUN(verifier_bounds_deduction);  void test_verifier_bounds_deduction_non_const(void)     { RUN(verifier_bounds_deduction_non_const); }  void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); }  void test_verifier_bpf_get_stack(void)        { RUN(verifier_bpf_get_stack); } +void test_verifier_bswap(void)                { RUN(verifier_bswap); }  void test_verifier_btf_ctx_access(void)       { RUN(verifier_btf_ctx_access); }  void test_verifier_cfg(void)                  { RUN(verifier_cfg); }  void test_verifier_cgroup_inv_retcode(void)   { RUN(verifier_cgroup_inv_retcode); } @@ -125,6 +132,7 @@ void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_acces  void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); }  void test_verifier_div0(void)                 { RUN(verifier_div0); }  void test_verifier_div_overflow(void)         { RUN(verifier_div_overflow); } +void test_verifier_gotol(void)                { RUN(verifier_gotol); }  void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }  void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }  void test_verifier_helper_restricted(void)    { RUN(verifier_helper_restricted); } @@ -132,6 +140,7 @@ void test_verifier_helper_value_access(void)  { RUN(verifier_helper_value_access  void test_verifier_int_ptr(void)              { RUN(verifier_int_ptr); }  void test_verifier_jeq_infer_not_null(void)   { RUN(verifier_jeq_infer_not_null); }  void test_verifier_ld_ind(void)               { RUN(verifier_ld_ind); } +void test_verifier_ldsx(void)                  { RUN(verifier_ldsx); }  void test_verifier_leak_ptr(void)             { RUN(verifier_leak_ptr); }  void test_verifier_loops1(void)               { RUN(verifier_loops1); }  void test_verifier_lwt(void)                  { RUN(verifier_lwt); } @@ -141,6 +150,7 @@ void test_verifier_map_ptr_mixing(void)       { RUN(verifier_map_ptr_mixing); }  void test_verifier_map_ret_val(void)          { RUN(verifier_map_ret_val); }  void test_verifier_masking(void)              { RUN(verifier_masking); }  void test_verifier_meta_access(void)          { RUN(verifier_meta_access); } +void test_verifier_movsx(void)                 { RUN(verifier_movsx); }  void test_verifier_netfilter_ctx(void)        { RUN(verifier_netfilter_ctx); }  void test_verifier_netfilter_retcode(void)    { RUN(verifier_netfilter_retcode); }  void test_verifier_prevent_map_lookup(void)   { RUN(verifier_prevent_map_lookup); } @@ -152,6 +162,7 @@ void test_verifier_regalloc(void)             { RUN(verifier_regalloc); }  void test_verifier_ringbuf(void)              { RUN(verifier_ringbuf); }  void test_verifier_runtime_jit(void)          { RUN(verifier_runtime_jit); }  void test_verifier_scalar_ids(void)           { RUN(verifier_scalar_ids); } +void test_verifier_sdiv(void)                 { RUN(verifier_sdiv); }  void test_verifier_search_pruning(void)       { RUN(verifier_search_pruning); }  void test_verifier_sock(void)                 { RUN(verifier_sock); }  void test_verifier_spill_fill(void)           { RUN(verifier_spill_fill); } @@ -159,6 +170,7 @@ void test_verifier_spin_lock(void)            { RUN(verifier_spin_lock); }  void test_verifier_stack_ptr(void)            { RUN(verifier_stack_ptr); }  void test_verifier_subprog_precision(void)    { RUN(verifier_subprog_precision); }  void test_verifier_subreg(void)               { RUN(verifier_subreg); } +void test_verifier_typedef(void)              { RUN(verifier_typedef); }  void test_verifier_uninit(void)               { RUN(verifier_uninit); }  void test_verifier_unpriv(void)               { RUN(verifier_unpriv); }  void test_verifier_unpriv_perf(void)          { RUN(verifier_unpriv_perf); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index fa3cac5488f5..e6bcb6051402 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -1,5 +1,6 @@  // SPDX-License-Identifier: GPL-2.0  #include <test_progs.h> +#include "test_xdp_attach_fail.skel.h"  #define IFINDEX_LO 1  #define XDP_FLAGS_REPLACE		(1U << 4) @@ -85,10 +86,74 @@ out_1:  	bpf_object__close(obj1);  } +#define ERRMSG_LEN 64 + +struct xdp_errmsg { +	char msg[ERRMSG_LEN]; +}; + +static void on_xdp_errmsg(void *ctx, int cpu, void *data, __u32 size) +{ +	struct xdp_errmsg *ctx_errmg = ctx, *tp_errmsg = data; + +	memcpy(&ctx_errmg->msg, &tp_errmsg->msg, ERRMSG_LEN); +} + +static const char tgt_errmsg[] = "Invalid XDP flags for BPF link attachment"; + +static void test_xdp_attach_fail(const char *file) +{ +	struct test_xdp_attach_fail *skel = NULL; +	struct xdp_errmsg errmsg = {}; +	struct perf_buffer *pb = NULL; +	struct bpf_object *obj = NULL; +	int err, fd_xdp; + +	LIBBPF_OPTS(bpf_link_create_opts, opts); + +	skel = test_xdp_attach_fail__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "test_xdp_attach_fail__open_and_load")) +		goto out_close; + +	err = test_xdp_attach_fail__attach(skel); +	if (!ASSERT_EQ(err, 0, "test_xdp_attach_fail__attach")) +		goto out_close; + +	/* set up perf buffer */ +	pb = perf_buffer__new(bpf_map__fd(skel->maps.xdp_errmsg_pb), 1, +			      on_xdp_errmsg, NULL, &errmsg, NULL); +	if (!ASSERT_OK_PTR(pb, "perf_buffer__new")) +		goto out_close; + +	err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &fd_xdp); +	if (!ASSERT_EQ(err, 0, "bpf_prog_test_load")) +		goto out_close; + +	opts.flags = 0xFF; // invalid flags to fail to attach XDP prog +	err = bpf_link_create(fd_xdp, IFINDEX_LO, BPF_XDP, &opts); +	if (!ASSERT_EQ(err, -EINVAL, "bpf_link_create")) +		goto out_close; + +	/* read perf buffer */ +	err = perf_buffer__poll(pb, 100); +	if (!ASSERT_GT(err, -1, "perf_buffer__poll")) +		goto out_close; + +	ASSERT_STRNEQ((const char *) errmsg.msg, tgt_errmsg, +		      42 /* strlen(tgt_errmsg) */, "check error message"); + +out_close: +	perf_buffer__free(pb); +	bpf_object__close(obj); +	test_xdp_attach_fail__destroy(skel); +} +  void serial_test_xdp_attach(void)  {  	if (test__start_subtest("xdp_attach"))  		test_xdp_attach("./test_xdp.bpf.o");  	if (test__start_subtest("xdp_attach_dynptr"))  		test_xdp_attach("./test_xdp_dynptr.bpf.o"); +	if (test__start_subtest("xdp_attach_failed")) +		test_xdp_attach_fail("./xdp_dummy.bpf.o");  }  |