diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
41 files changed, 2243 insertions, 103 deletions
| diff --git a/tools/testing/selftests/bpf/prog_tests/arena_htab.c b/tools/testing/selftests/bpf/prog_tests/arena_htab.c new file mode 100644 index 000000000000..0766702de846 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arena_htab.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <sys/mman.h> +#include <network_helpers.h> + +#include "arena_htab_asm.skel.h" +#include "arena_htab.skel.h" + +#define PAGE_SIZE 4096 + +#include "bpf_arena_htab.h" + +static void test_arena_htab_common(struct htab *htab) +{ +	int i; + +	printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets); +	ASSERT_OK_PTR(htab->buckets, "htab->buckets shouldn't be NULL"); +	for (i = 0; htab->buckets && i < 16; i += 4) { +		/* +		 * Walk htab buckets and link lists since all pointers are correct, +		 * though they were written by bpf program. +		 */ +		int val = htab_lookup_elem(htab, i); + +		ASSERT_EQ(i, val, "key == value"); +	} +} + +static void test_arena_htab_llvm(void) +{ +	LIBBPF_OPTS(bpf_test_run_opts, opts); +	struct arena_htab *skel; +	struct htab *htab; +	size_t arena_sz; +	void *area; +	int ret; + +	skel = arena_htab__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "arena_htab__open_and_load")) +		return; + +	area = bpf_map__initial_value(skel->maps.arena, &arena_sz); +	/* fault-in a page with pgoff == 0 as sanity check */ +	*(volatile int *)area = 0x55aa; + +	/* bpf prog will allocate more pages */ +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_htab_llvm), &opts); +	ASSERT_OK(ret, "ret"); +	ASSERT_OK(opts.retval, "retval"); +	if (skel->bss->skip) { +		printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__); +		test__skip(); +		goto out; +	} +	htab = skel->bss->htab_for_user; +	test_arena_htab_common(htab); +out: +	arena_htab__destroy(skel); +} + +static void test_arena_htab_asm(void) +{ +	LIBBPF_OPTS(bpf_test_run_opts, opts); +	struct arena_htab_asm *skel; +	struct htab *htab; +	int ret; + +	skel = arena_htab_asm__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "arena_htab_asm__open_and_load")) +		return; + +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_htab_asm), &opts); +	ASSERT_OK(ret, "ret"); +	ASSERT_OK(opts.retval, "retval"); +	htab = skel->bss->htab_for_user; +	test_arena_htab_common(htab); +	arena_htab_asm__destroy(skel); +} + +void test_arena_htab(void) +{ +	if (test__start_subtest("arena_htab_llvm")) +		test_arena_htab_llvm(); +	if (test__start_subtest("arena_htab_asm")) +		test_arena_htab_asm(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/arena_list.c b/tools/testing/selftests/bpf/prog_tests/arena_list.c new file mode 100644 index 000000000000..e61886debab1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arena_list.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <sys/mman.h> +#include <network_helpers.h> + +#define PAGE_SIZE 4096 + +#include "bpf_arena_list.h" +#include "arena_list.skel.h" + +struct elem { +	struct arena_list_node node; +	__u64 value; +}; + +static int list_sum(struct arena_list_head *head) +{ +	struct elem __arena *n; +	int sum = 0; + +	list_for_each_entry(n, head, node) +		sum += n->value; +	return sum; +} + +static void test_arena_list_add_del(int cnt) +{ +	LIBBPF_OPTS(bpf_test_run_opts, opts); +	struct arena_list *skel; +	int expected_sum = (u64)cnt * (cnt - 1) / 2; +	int ret, sum; + +	skel = arena_list__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "arena_list__open_and_load")) +		return; + +	skel->bss->cnt = cnt; +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_add), &opts); +	ASSERT_OK(ret, "ret_add"); +	ASSERT_OK(opts.retval, "retval"); +	if (skel->bss->skip) { +		printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__); +		test__skip(); +		goto out; +	} +	sum = list_sum(skel->bss->list_head); +	ASSERT_EQ(sum, expected_sum, "sum of elems"); +	ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems"); +	ASSERT_EQ(skel->arena->test_val, cnt + 1, "num of elems"); + +	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_del), &opts); +	ASSERT_OK(ret, "ret_del"); +	sum = list_sum(skel->bss->list_head); +	ASSERT_EQ(sum, 0, "sum of list elems after del"); +	ASSERT_EQ(skel->bss->list_sum, expected_sum, "sum of list elems computed by prog"); +	ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems"); +out: +	arena_list__destroy(skel); +} + +void test_arena_list(void) +{ +	if (test__start_subtest("arena_list_1")) +		test_arena_list_add_del(1); +	if (test__start_subtest("arena_list_1000")) +		test_arena_list_add_del(1000); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c b/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c new file mode 100644 index 000000000000..6a707213e46b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "bad_struct_ops.skel.h" +#include "bad_struct_ops2.skel.h" + +static void invalid_prog_reuse(void) +{ +	struct bad_struct_ops *skel; +	char *log = NULL; +	int err; + +	skel = bad_struct_ops__open(); +	if (!ASSERT_OK_PTR(skel, "bad_struct_ops__open")) +		return; + +	if (start_libbpf_log_capture()) +		goto cleanup; + +	err = bad_struct_ops__load(skel); +	log = stop_libbpf_log_capture(); +	ASSERT_ERR(err, "bad_struct_ops__load should fail"); +	ASSERT_HAS_SUBSTR(log, +		"struct_ops init_kern testmod_2 func ptr test_1: invalid reuse of prog test_1", +		"expected init_kern message"); + +cleanup: +	free(log); +	bad_struct_ops__destroy(skel); +} + +static void unused_program(void) +{ +	struct bad_struct_ops2 *skel; +	char *log = NULL; +	int err; + +	skel = bad_struct_ops2__open(); +	if (!ASSERT_OK_PTR(skel, "bad_struct_ops2__open")) +		return; + +	/* struct_ops programs not referenced from any maps are open +	 * with autoload set to true. +	 */ +	ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo autoload == true"); + +	if (start_libbpf_log_capture()) +		goto cleanup; + +	err = bad_struct_ops2__load(skel); +	ASSERT_ERR(err, "bad_struct_ops2__load should fail"); +	log = stop_libbpf_log_capture(); +	ASSERT_HAS_SUBSTR(log, "prog 'foo': failed to load", +			  "message about 'foo' failing to load"); + +cleanup: +	free(log); +	bad_struct_ops2__destroy(skel); +} + +void test_bad_struct_ops(void) +{ +	if (test__start_subtest("invalid_prog_reuse")) +		invalid_prog_reuse(); +	if (test__start_subtest("unused_program")) +		unused_program(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index e770912fc1d2..4c6ada5b270b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -35,7 +35,7 @@ static int check_load(const char *file, enum bpf_prog_type type)  	}  	bpf_program__set_type(prog, type); -	bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS); +	bpf_program__set_flags(prog, testing_prog_flags());  	bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags);  	err = bpf_object__load(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 816145bcb647..00965a6e83bb 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3535,6 +3535,32 @@ static struct btf_raw_test raw_tests[] = {  	.value_type_id = 1,  	.max_entries = 1,  }, +{ +	.descr = "datasec: name '?.foo bar:buz' is ok", +	.raw_types = { +		/* int */ +		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */ +		/* VAR x */                                     /* [2] */ +		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), +		BTF_VAR_STATIC, +		/* DATASEC ?.data */                            /* [3] */ +		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), +		BTF_VAR_SECINFO_ENC(2, 0, 4), +		BTF_END_RAW, +	}, +	BTF_STR_SEC("\0x\0?.foo bar:buz"), +}, +{ +	.descr = "type name '?foo' is not ok", +	.raw_types = { +		/* union ?foo; */ +		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */ +		BTF_END_RAW, +	}, +	BTF_STR_SEC("\0?foo"), +	.err_str = "Invalid name", +	.btf_load_err = true, +},  {  	.descr = "float test #1, well-formed", @@ -4363,6 +4389,9 @@ static void do_test_raw(unsigned int test_num)  	if (err || btf_fd < 0)  		goto done; +	if (!test->map_type) +		goto done; +  	opts.btf_fd = btf_fd;  	opts.btf_key_type_id = test->key_type_id;  	opts.btf_value_type_id = test->value_type_id; diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index c2e886399e3c..ecf89df78109 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -27,7 +27,7 @@ static void verify_success(const char *prog_name)  	struct bpf_program *prog;  	struct bpf_link *link = NULL;  	pid_t child_pid; -	int status; +	int status, err;  	skel = cpumask_success__open();  	if (!ASSERT_OK_PTR(skel, "cpumask_success__open")) @@ -36,8 +36,8 @@ static void verify_success(const char *prog_name)  	skel->bss->pid = getpid();  	skel->bss->nr_cpus = libbpf_num_possible_cpus(); -	cpumask_success__load(skel); -	if (!ASSERT_OK_PTR(skel, "cpumask_success__load")) +	err = cpumask_success__load(skel); +	if (!ASSERT_OK(err, "cpumask_success__load"))  		goto cleanup;  	prog = bpf_object__find_program_by_name(skel->obj, prog_name); diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c index 4951aa978f33..3b7c57fe55a5 100644 --- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c +++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c @@ -626,50 +626,6 @@ err:  	return false;  } -/* Request BPF program instructions after all rewrites are applied, - * e.g. verifier.c:convert_ctx_access() is done. - */ -static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt) -{ -	struct bpf_prog_info info = {}; -	__u32 info_len = sizeof(info); -	__u32 xlated_prog_len; -	__u32 buf_element_size = sizeof(struct bpf_insn); - -	if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { -		perror("bpf_prog_get_info_by_fd failed"); -		return -1; -	} - -	xlated_prog_len = info.xlated_prog_len; -	if (xlated_prog_len % buf_element_size) { -		printf("Program length %d is not multiple of %d\n", -		       xlated_prog_len, buf_element_size); -		return -1; -	} - -	*cnt = xlated_prog_len / buf_element_size; -	*buf = calloc(*cnt, buf_element_size); -	if (!buf) { -		perror("can't allocate xlated program buffer"); -		return -ENOMEM; -	} - -	bzero(&info, sizeof(info)); -	info.xlated_prog_len = xlated_prog_len; -	info.xlated_prog_insns = (__u64)(unsigned long)*buf; -	if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { -		perror("second bpf_prog_get_info_by_fd failed"); -		goto out_free_buf; -	} - -	return 0; - -out_free_buf: -	free(*buf); -	return -1; -} -  static void print_insn(void *private_data, const char *fmt, ...)  {  	va_list args; diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c index 5c0ebe6ba866..dcb9e5070cc3 100644 --- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c +++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c @@ -72,6 +72,6 @@ fail:  		bpf_tc_hook_destroy(&qdisc_hook);  		close_netns(nstoken);  	} -	SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); +	SYS_NOFAIL("ip netns del " NS_TEST);  	decap_sanity__destroy(skel);  } diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c index 4ad4cd69152e..3379df2d4cf2 100644 --- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c @@ -298,6 +298,6 @@ void test_fib_lookup(void)  fail:  	if (nstoken)  		close_netns(nstoken); -	SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); +	SYS_NOFAIL("ip netns del " NS_TEST);  	fib_lookup__destroy(skel);  } diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c index d4b1901f7879..f3932941bbaa 100644 --- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -19,6 +19,7 @@ static const char *kmulti_syms[] = {  };  #define KMULTI_CNT ARRAY_SIZE(kmulti_syms)  static __u64 kmulti_addrs[KMULTI_CNT]; +static __u64 kmulti_cookies[] = { 3, 1, 2 };  #define KPROBE_FUNC "bpf_fentry_test1"  static __u64 kprobe_addr; @@ -31,6 +32,8 @@ static noinline void uprobe_func(void)  	asm volatile ("");  } +#define PERF_EVENT_COOKIE 0xdeadbeef +  static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,  				 ssize_t offset, ssize_t entry_offset)  { @@ -62,6 +65,8 @@ again:  			ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,  				  "kprobe_addr"); +		ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie"); +  		if (!info.perf_event.kprobe.func_name) {  			ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");  			info.perf_event.kprobe.func_name = ptr_to_u64(&buf); @@ -81,6 +86,8 @@ again:  			goto again;  		} +		ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie"); +  		err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,  			      strlen(TP_NAME));  		ASSERT_EQ(err, 0, "cmp_tp_name"); @@ -96,10 +103,17 @@ again:  			goto again;  		} +		ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie"); +  		err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,  			      strlen(UPROBE_FILE));  			ASSERT_EQ(err, 0, "cmp_file_name");  		break; +	case BPF_PERF_EVENT_EVENT: +		ASSERT_EQ(info.perf_event.event.type, PERF_TYPE_SOFTWARE, "event_type"); +		ASSERT_EQ(info.perf_event.event.config, PERF_COUNT_SW_PAGE_FAULTS, "event_config"); +		ASSERT_EQ(info.perf_event.event.cookie, PERF_EVENT_COOKIE, "event_cookie"); +		break;  	default:  		err = -1;  		break; @@ -139,6 +153,7 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,  	DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,  		.attach_mode = PROBE_ATTACH_MODE_LINK,  		.retprobe = type == BPF_PERF_EVENT_KRETPROBE, +		.bpf_cookie = PERF_EVENT_COOKIE,  	);  	ssize_t entry_offset = 0;  	struct bpf_link *link; @@ -163,10 +178,13 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,  static void test_tp_fill_link_info(struct test_fill_link_info *skel)  { +	DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts, +		.bpf_cookie = PERF_EVENT_COOKIE, +	);  	struct bpf_link *link;  	int link_fd, err; -	link = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME); +	link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts);  	if (!ASSERT_OK_PTR(link, "attach_tp"))  		return; @@ -176,16 +194,53 @@ static void test_tp_fill_link_info(struct test_fill_link_info *skel)  	bpf_link__destroy(link);  } +static void test_event_fill_link_info(struct test_fill_link_info *skel) +{ +	DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts, +		.bpf_cookie = PERF_EVENT_COOKIE, +	); +	struct bpf_link *link; +	int link_fd, err, pfd; +	struct perf_event_attr attr = { +		.type = PERF_TYPE_SOFTWARE, +		.config = PERF_COUNT_SW_PAGE_FAULTS, +		.freq = 1, +		.sample_freq = 1, +		.size = sizeof(struct perf_event_attr), +	}; + +	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */, +		      -1 /* group id */, 0 /* flags */); +	if (!ASSERT_GE(pfd, 0, "perf_event_open")) +		return; + +	link = bpf_program__attach_perf_event_opts(skel->progs.event_run, pfd, &opts); +	if (!ASSERT_OK_PTR(link, "attach_event")) +		goto error; + +	link_fd = bpf_link__fd(link); +	err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_EVENT, 0, 0, 0); +	ASSERT_OK(err, "verify_perf_link_info"); +	bpf_link__destroy(link); + +error: +	close(pfd); +} +  static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,  				       enum bpf_perf_event_type type)  { +	DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, +		.retprobe = type == BPF_PERF_EVENT_URETPROBE, +		.bpf_cookie = PERF_EVENT_COOKIE, +	);  	struct bpf_link *link;  	int link_fd, err; -	link = bpf_program__attach_uprobe(skel->progs.uprobe_run, -					  type == BPF_PERF_EVENT_URETPROBE, -					  0, /* self pid */ -					  UPROBE_FILE, uprobe_offset); +	link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run, +					       0, /* self pid */ +					       UPROBE_FILE, uprobe_offset, +					       &opts);  	if (!ASSERT_OK_PTR(link, "attach_uprobe"))  		return; @@ -195,11 +250,11 @@ static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,  	bpf_link__destroy(link);  } -static int verify_kmulti_link_info(int fd, bool retprobe) +static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies)  { +	__u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT];  	struct bpf_link_info info;  	__u32 len = sizeof(info); -	__u64 addrs[KMULTI_CNT];  	int flags, i, err;  	memset(&info, 0, sizeof(info)); @@ -221,18 +276,22 @@ again:  	if (!info.kprobe_multi.addrs) {  		info.kprobe_multi.addrs = ptr_to_u64(addrs); +		info.kprobe_multi.cookies = ptr_to_u64(cookies);  		goto again;  	} -	for (i = 0; i < KMULTI_CNT; i++) +	for (i = 0; i < KMULTI_CNT; i++) {  		ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); +		ASSERT_EQ(cookies[i], has_cookies ? kmulti_cookies[i] : 0, +			  "kmulti_cookies_value"); +	}  	return 0;  }  static void verify_kmulti_invalid_user_buffer(int fd)  { +	__u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT];  	struct bpf_link_info info;  	__u32 len = sizeof(info); -	__u64 addrs[KMULTI_CNT];  	int err, i;  	memset(&info, 0, sizeof(info)); @@ -266,7 +325,20 @@ static void verify_kmulti_invalid_user_buffer(int fd)  	info.kprobe_multi.count = KMULTI_CNT;  	info.kprobe_multi.addrs = 0x1; /* invalid addr */  	err = bpf_link_get_info_by_fd(fd, &info, &len); -	ASSERT_EQ(err, -EFAULT, "invalid_buff"); +	ASSERT_EQ(err, -EFAULT, "invalid_buff_addrs"); + +	info.kprobe_multi.count = KMULTI_CNT; +	info.kprobe_multi.addrs = ptr_to_u64(addrs); +	info.kprobe_multi.cookies = 0x1; /* invalid addr */ +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -EFAULT, "invalid_buff_cookies"); + +	/* cookies && !count */ +	info.kprobe_multi.count = 0; +	info.kprobe_multi.addrs = ptr_to_u64(NULL); +	info.kprobe_multi.cookies = ptr_to_u64(cookies); +	err = bpf_link_get_info_by_fd(fd, &info, &len); +	ASSERT_EQ(err, -EINVAL, "invalid_cookies_count");  }  static int symbols_cmp_r(const void *a, const void *b) @@ -278,13 +350,15 @@ static int symbols_cmp_r(const void *a, const void *b)  }  static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, -					     bool retprobe, bool invalid) +					     bool retprobe, bool cookies, +					     bool invalid)  {  	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);  	struct bpf_link *link;  	int link_fd, err;  	opts.syms = kmulti_syms; +	opts.cookies = cookies ? kmulti_cookies : NULL;  	opts.cnt = KMULTI_CNT;  	opts.retprobe = retprobe;  	link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts); @@ -293,7 +367,7 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,  	link_fd = bpf_link__fd(link);  	if (!invalid) { -		err = verify_kmulti_link_info(link_fd, retprobe); +		err = verify_kmulti_link_info(link_fd, retprobe, cookies);  		ASSERT_OK(err, "verify_kmulti_link_info");  	} else {  		verify_kmulti_invalid_user_buffer(link_fd); @@ -513,6 +587,8 @@ void test_fill_link_info(void)  		test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true);  	if (test__start_subtest("tracepoint_link_info"))  		test_tp_fill_link_info(skel); +	if (test__start_subtest("event_link_info")) +		test_event_fill_link_info(skel);  	uprobe_offset = get_uprobe_offset(&uprobe_func);  	if (test__start_subtest("uprobe_link_info")) @@ -523,12 +599,16 @@ void test_fill_link_info(void)  	qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r);  	for (i = 0; i < KMULTI_CNT; i++)  		kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]); -	if (test__start_subtest("kprobe_multi_link_info")) -		test_kprobe_multi_fill_link_info(skel, false, false); -	if (test__start_subtest("kretprobe_multi_link_info")) -		test_kprobe_multi_fill_link_info(skel, true, false); +	if (test__start_subtest("kprobe_multi_link_info")) { +		test_kprobe_multi_fill_link_info(skel, false, false, false); +		test_kprobe_multi_fill_link_info(skel, false, true, false); +	} +	if (test__start_subtest("kretprobe_multi_link_info")) { +		test_kprobe_multi_fill_link_info(skel, true, false, false); +		test_kprobe_multi_fill_link_info(skel, true, true, false); +	}  	if (test__start_subtest("kprobe_multi_invalid_ubuff")) -		test_kprobe_multi_fill_link_info(skel, true, true); +		test_kprobe_multi_fill_link_info(skel, true, true, true);  	if (test__start_subtest("uprobe_multi_link_info"))  		test_uprobe_multi_fill_link_info(skel, false, false); diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c index 57c814f5f6a7..8dd2af9081f4 100644 --- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c +++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c @@ -59,9 +59,9 @@ static int setup_topology(bool ipv6)  	/* Wait for up to 5s for links to come up */  	for (i = 0; i < 5; ++i) {  		if (ipv6) -			up = !system("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6 " &>/dev/null"); +			up = !SYS_NOFAIL("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6);  		else -			up = !system("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR " &>/dev/null"); +			up = !SYS_NOFAIL("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR);  		if (up)  			break; diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c index bf84d4a1d9ae..3c440370c1f0 100644 --- a/tools/testing/selftests/bpf/prog_tests/iters.c +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -193,6 +193,7 @@ static void subtest_task_iters(void)  	ASSERT_EQ(skel->bss->procs_cnt, 1, "procs_cnt");  	ASSERT_EQ(skel->bss->threads_cnt, thread_num + 1, "threads_cnt");  	ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 1, "proc_threads_cnt"); +	ASSERT_EQ(skel->bss->invalid_cnt, 0, "invalid_cnt");  	pthread_mutex_unlock(&do_nothing_mutex);  	for (int i = 0; i < thread_num; i++)  		ASSERT_OK(pthread_join(thread_ids[i], &ret), "pthread_join"); diff --git a/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c new file mode 100644 index 000000000000..7def158da9eb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <test_progs.h> + +#include "linux/filter.h" +#include "kptr_xchg_inline.skel.h" + +void test_kptr_xchg_inline(void) +{ +	struct kptr_xchg_inline *skel; +	struct bpf_insn *insn = NULL; +	struct bpf_insn exp; +	unsigned int cnt; +	int err; + +#if !(defined(__x86_64__) || defined(__aarch64__) || \ +      (defined(__riscv) && __riscv_xlen == 64)) +	test__skip(); +	return; +#endif + +	skel = kptr_xchg_inline__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "open_load")) +		return; + +	err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt); +	if (!ASSERT_OK(err, "prog insn")) +		goto out; + +	/* The original instructions are: +	 * r1 = map[id:xxx][0]+0 +	 * r2 = 0 +	 * call bpf_kptr_xchg#yyy +	 * +	 * call bpf_kptr_xchg#yyy will be inlined as: +	 * r0 = r2 +	 * r0 = atomic64_xchg((u64 *)(r1 +0), r0) +	 */ +	if (!ASSERT_GT(cnt, 5, "insn cnt")) +		goto out; + +	exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2); +	if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov")) +		goto out; + +	exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0); +	if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg")) +		goto out; +out: +	free(insn); +	kptr_xchg_inline__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c index 9f766ddd946a..4ed46ed58a7b 100644 --- a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c @@ -30,6 +30,8 @@ void test_libbpf_probe_prog_types(void)  		if (prog_type == BPF_PROG_TYPE_UNSPEC)  			continue; +		if (strcmp(prog_type_name, "__MAX_BPF_PROG_TYPE") == 0) +			continue;  		if (!test__start_subtest(prog_type_name))  			continue; @@ -68,6 +70,8 @@ void test_libbpf_probe_map_types(void)  		if (map_type == BPF_MAP_TYPE_UNSPEC)  			continue; +		if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0) +			continue;  		if (!test__start_subtest(map_type_name))  			continue; diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c index eb34d612d6f8..62ea855ec4d0 100644 --- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c @@ -132,6 +132,9 @@ static void test_libbpf_bpf_map_type_str(void)  		const char *map_type_str;  		char buf[256]; +		if (map_type == __MAX_BPF_MAP_TYPE) +			continue; +  		map_type_name = btf__str_by_offset(btf, e->name_off);  		map_type_str = libbpf_bpf_map_type_str(map_type);  		ASSERT_OK_PTR(map_type_str, map_type_name); @@ -186,6 +189,9 @@ static void test_libbpf_bpf_prog_type_str(void)  		const char *prog_type_str;  		char buf[256]; +		if (prog_type == __MAX_BPF_PROG_TYPE) +			continue; +  		prog_type_name = btf__str_by_offset(btf, e->name_off);  		prog_type_str = libbpf_bpf_prog_type_str(prog_type);  		ASSERT_OK_PTR(prog_type_str, prog_type_name); diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index 7a3fa2ff567b..90a98e23be61 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -169,9 +169,9 @@ void test_log_fixup(void)  	if (test__start_subtest("bad_core_relo_trunc_none"))  		bad_core_relo(0, TRUNC_NONE /* full buf */);  	if (test__start_subtest("bad_core_relo_trunc_partial")) -		bad_core_relo(280, TRUNC_PARTIAL /* truncate original log a bit */); +		bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);  	if (test__start_subtest("bad_core_relo_trunc_full")) -		bad_core_relo(220, TRUNC_FULL  /* truncate also libbpf's message patch */); +		bad_core_relo(240, TRUNC_FULL  /* truncate also libbpf's message patch */);  	if (test__start_subtest("bad_core_relo_subprog"))  		bad_core_relo_subprog();  	if (test__start_subtest("missing_map")) diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h index e9190574e79f..fb1eb8c67361 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h @@ -27,8 +27,6 @@  			}                                                     \  	}) -#define NETNS "ns_lwt" -  static inline int netns_create(void)  {  	return system("ip netns add " NETNS); diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c index 59b38569f310..835a1d756c16 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c @@ -54,6 +54,7 @@  #include <stdbool.h>  #include <stdlib.h> +#define NETNS "ns_lwt_redirect"  #include "lwt_helpers.h"  #include "test_progs.h"  #include "network_helpers.h" @@ -85,7 +86,7 @@ static void ping_dev(const char *dev, bool is_ingress)  		snprintf(ip, sizeof(ip), "20.0.0.%d", link_index);  	/* We won't get a reply. Don't fail here */ -	SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", +	SYS_NOFAIL("ping %s -c1 -W1 -s %d",  		   ip, ICMP_PAYLOAD_SIZE);  } @@ -203,6 +204,7 @@ static int setup_redirect_target(const char *target_dev, bool need_mac)  	if (!ASSERT_GE(target_index, 0, "if_nametoindex"))  		goto fail; +	SYS(fail, "sysctl -w net.ipv6.conf.all.disable_ipv6=1");  	SYS(fail, "ip link add link_err type dummy");  	SYS(fail, "ip link set lo up");  	SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32"); diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c index f4bb2d5fcae0..03825d2b45a8 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c @@ -48,6 +48,7 @@   *  For case 2, force UDP packets to overflow fq limit. As long as kernel   *  is not crashed, it is considered successful.   */ +#define NETNS "ns_lwt_reroute"  #include "lwt_helpers.h"  #include "network_helpers.h"  #include <linux/net_tstamp.h> @@ -63,7 +64,7 @@  static void ping_once(const char *ip)  {  	/* We won't get a reply. Don't fail here */ -	SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", +	SYS_NOFAIL("ping %s -c1 -W1 -s %d",  		   ip, ICMP_PAYLOAD_SIZE);  } diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index 7c0be7cf550b..8f8d792307c1 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -79,7 +79,7 @@ static void cleanup_netns(struct nstoken *nstoken)  	if (nstoken)  		close_netns(nstoken); -	SYS_NOFAIL("ip netns del %s &> /dev/null", NS_TEST); +	SYS_NOFAIL("ip netns del %s", NS_TEST);  }  static int verify_tsk(int map_fd, int client_fd) diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c index 3f1f58d3a729..a1f7e7378a64 100644 --- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -29,6 +29,10 @@ static void test_success(void)  	bpf_program__set_autoload(skel->progs.non_sleepable_1, true);  	bpf_program__set_autoload(skel->progs.non_sleepable_2, true);  	bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true); +	bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog, true); +	bpf_program__set_autoload(skel->progs.rcu_read_lock_global_subprog, true); +	bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_lock, true); +	bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_unlock, true);  	err = rcu_read_lock__load(skel);  	if (!ASSERT_OK(err, "skel_load"))  		goto out; @@ -75,6 +79,8 @@ static const char * const inproper_region_tests[] = {  	"inproper_sleepable_helper",  	"inproper_sleepable_kfunc",  	"nested_rcu_region", +	"rcu_read_lock_global_subprog_lock", +	"rcu_read_lock_global_subprog_unlock",  };  static void test_inproper_region(void) diff --git a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c new file mode 100644 index 000000000000..3405923fe4e6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024. Huawei Technologies Co., Ltd */ +#include "test_progs.h" +#include "read_vsyscall.skel.h" + +#if defined(__x86_64__) +/* For VSYSCALL_ADDR */ +#include <asm/vsyscall.h> +#else +/* To prevent build failure on non-x86 arch */ +#define VSYSCALL_ADDR 0UL +#endif + +struct read_ret_desc { +	const char *name; +	int ret; +} all_read[] = { +	{ .name = "probe_read_kernel", .ret = -ERANGE }, +	{ .name = "probe_read_kernel_str", .ret = -ERANGE }, +	{ .name = "probe_read", .ret = -ERANGE }, +	{ .name = "probe_read_str", .ret = -ERANGE }, +	{ .name = "probe_read_user", .ret = -EFAULT }, +	{ .name = "probe_read_user_str", .ret = -EFAULT }, +	{ .name = "copy_from_user", .ret = -EFAULT }, +	{ .name = "copy_from_user_task", .ret = -EFAULT }, +}; + +void test_read_vsyscall(void) +{ +	struct read_vsyscall *skel; +	unsigned int i; +	int err; + +#if !defined(__x86_64__) +	test__skip(); +	return; +#endif +	skel = read_vsyscall__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "read_vsyscall open_load")) +		return; + +	skel->bss->target_pid = getpid(); +	err = read_vsyscall__attach(skel); +	if (!ASSERT_EQ(err, 0, "read_vsyscall attach")) +		goto out; + +	/* userspace may don't have vsyscall page due to LEGACY_VSYSCALL_NONE, +	 * but it doesn't affect the returned error codes. +	 */ +	skel->bss->user_ptr = (void *)VSYSCALL_ADDR; +	usleep(1); + +	for (i = 0; i < ARRAY_SIZE(all_read); i++) +		ASSERT_EQ(skel->bss->read_ret[i], all_read[i].ret, all_read[i].name); +out: +	read_vsyscall__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c index 820d0bcfc474..eb74363f9f70 100644 --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c @@ -840,7 +840,7 @@ static int load_range_cmp_prog(struct range x, struct range y, enum op op,  		.log_level = 2,  		.log_buf = log_buf,  		.log_size = log_sz, -		.prog_flags = BPF_F_TEST_REG_INVARIANTS, +		.prog_flags = testing_prog_flags(),  	);  	/* ; skip exit block below diff --git a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c index b0583309a94e..9c11938fe597 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c @@ -214,7 +214,7 @@ void test_sock_destroy(void)  cleanup:  	if (nstoken)  		close_netns(nstoken); -	SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); +	SYS_NOFAIL("ip netns del " TEST_NS);  	if (cgroup_fd >= 0)  		close(cgroup_fd);  	sock_destroy_prog__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index 0c365f36c73b..d56e18b25528 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -112,7 +112,7 @@ void test_sock_iter_batch(void)  {  	struct nstoken *nstoken = NULL; -	SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); +	SYS_NOFAIL("ip netns del " TEST_NS);  	SYS(done, "ip netns add %s", TEST_NS);  	SYS(done, "ip -net %s link set dev lo up", TEST_NS); @@ -131,5 +131,5 @@ void test_sock_iter_batch(void)  	close_netns(nstoken);  done: -	SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); +	SYS_NOFAIL("ip netns del " TEST_NS);  } diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c index 18d451be57c8..2b0068742ef9 100644 --- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -48,6 +48,8 @@ static struct {  	{ "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" },  	{ "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" },  	{ "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" }, +	{ "lock_global_subprog_call1", "global function calls are not allowed while holding a lock" }, +	{ "lock_global_subprog_call2", "global function calls are not allowed while holding a lock" },  };  static int match_regex(const char *pattern, const char *string) diff --git a/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c b/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c new file mode 100644 index 000000000000..a5cc593c1e1d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "struct_ops_autocreate.skel.h" +#include "struct_ops_autocreate2.skel.h" + +static void cant_load_full_object(void) +{ +	struct struct_ops_autocreate *skel; +	char *log = NULL; +	int err; + +	skel = struct_ops_autocreate__open(); +	if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open")) +		return; + +	if (start_libbpf_log_capture()) +		goto cleanup; +	/* The testmod_2 map BTF type (struct bpf_testmod_ops___v2) doesn't +	 * match the BTF of the actual struct bpf_testmod_ops defined in the +	 * kernel, so we should fail to load it if we don't disable autocreate +	 * for that map. +	 */ +	err = struct_ops_autocreate__load(skel); +	log = stop_libbpf_log_capture(); +	if (!ASSERT_ERR(err, "struct_ops_autocreate__load")) +		goto cleanup; + +	ASSERT_HAS_SUBSTR(log, "libbpf: struct_ops init_kern", "init_kern message"); +	ASSERT_EQ(err, -ENOTSUP, "errno should be ENOTSUP"); + +cleanup: +	free(log); +	struct_ops_autocreate__destroy(skel); +} + +static int check_test_1_link(struct struct_ops_autocreate *skel, struct bpf_map *map) +{ +	struct bpf_link *link; +	int err; + +	link = bpf_map__attach_struct_ops(skel->maps.testmod_1); +	if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) +		return -1; + +	/* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */ +	err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result"); +	bpf_link__destroy(link); +	return err; +} + +static void can_load_partial_object(void) +{ +	struct struct_ops_autocreate *skel; +	int err; + +	skel = struct_ops_autocreate__open(); +	if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts")) +		return; + +	err = bpf_map__set_autocreate(skel->maps.testmod_2, false); +	if (!ASSERT_OK(err, "bpf_map__set_autocreate")) +		goto cleanup; + +	ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 default autoload"); +	ASSERT_TRUE(bpf_program__autoload(skel->progs.test_2), "test_2 default autoload"); + +	err = struct_ops_autocreate__load(skel); +	if (ASSERT_OK(err, "struct_ops_autocreate__load")) +		goto cleanup; + +	ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 actual autoload"); +	ASSERT_FALSE(bpf_program__autoload(skel->progs.test_2), "test_2 actual autoload"); + +	check_test_1_link(skel, skel->maps.testmod_1); + +cleanup: +	struct_ops_autocreate__destroy(skel); +} + +static void optional_maps(void) +{ +	struct struct_ops_autocreate *skel; +	int err; + +	skel = struct_ops_autocreate__open(); +	if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open")) +		return; + +	ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_1), "testmod_1 autocreate"); +	ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_2), "testmod_2 autocreate"); +	ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map), "optional_map autocreate"); +	ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map2), "optional_map2 autocreate"); + +	err  = bpf_map__set_autocreate(skel->maps.testmod_1, false); +	err |= bpf_map__set_autocreate(skel->maps.testmod_2, false); +	err |= bpf_map__set_autocreate(skel->maps.optional_map2, true); +	if (!ASSERT_OK(err, "bpf_map__set_autocreate")) +		goto cleanup; + +	err = struct_ops_autocreate__load(skel); +	if (ASSERT_OK(err, "struct_ops_autocreate__load")) +		goto cleanup; + +	check_test_1_link(skel, skel->maps.optional_map2); + +cleanup: +	struct_ops_autocreate__destroy(skel); +} + +/* Swap test_mod1->test_1 program from 'bar' to 'foo' using shadow vars. + * test_mod1 load should enable autoload for 'foo'. + */ +static void autoload_and_shadow_vars(void) +{ +	struct struct_ops_autocreate2 *skel = NULL; +	struct bpf_link *link = NULL; +	int err; + +	skel = struct_ops_autocreate2__open(); +	if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts")) +		return; + +	ASSERT_FALSE(bpf_program__autoload(skel->progs.foo), "foo default autoload"); +	ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar default autoload"); + +	/* loading map testmod_1 would switch foo's autoload to true */ +	skel->struct_ops.testmod_1->test_1 = skel->progs.foo; + +	err = struct_ops_autocreate2__load(skel); +	if (ASSERT_OK(err, "struct_ops_autocreate__load")) +		goto cleanup; + +	ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo actual autoload"); +	ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar actual autoload"); + +	link = bpf_map__attach_struct_ops(skel->maps.testmod_1); +	if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) +		goto cleanup; + +	/* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */ +	err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result"); + +cleanup: +	bpf_link__destroy(link); +	struct_ops_autocreate2__destroy(skel); +} + +void test_struct_ops_autocreate(void) +{ +	if (test__start_subtest("cant_load_full_object")) +		cant_load_full_object(); +	if (test__start_subtest("can_load_partial_object")) +		can_load_partial_object(); +	if (test__start_subtest("autoload_and_shadow_vars")) +		autoload_and_shadow_vars(); +	if (test__start_subtest("optional_maps")) +		optional_maps(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index ea8537c54413..c33c05161a9e 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -117,12 +117,6 @@ static void test_recursion(void)  	ASSERT_OK(err, "lookup map_b");  	ASSERT_EQ(value, 100, "map_b value"); -	prog_fd = bpf_program__fd(skel->progs.on_lookup); -	memset(&info, 0, sizeof(info)); -	err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); -	ASSERT_OK(err, "get prog info"); -	ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion"); -  	prog_fd = bpf_program__fd(skel->progs.on_update);  	memset(&info, 0, sizeof(info));  	err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 518f143c5b0f..dbe06aeaa2b2 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -188,6 +188,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)  {  	struct nstoken *nstoken = NULL;  	char src_fwd_addr[IFADDR_STR_LEN+1] = {}; +	char src_addr[IFADDR_STR_LEN + 1] = {};  	int err;  	if (result->dev_mode == MODE_VETH) { @@ -208,6 +209,9 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)  	if (get_ifaddr("src_fwd", src_fwd_addr))  		goto fail; +	if (get_ifaddr("src", src_addr)) +		goto fail; +  	result->ifindex_src = if_nametoindex("src");  	if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src"))  		goto fail; @@ -270,6 +274,13 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)  	SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global");  	SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global"); +	if (result->dev_mode == MODE_VETH) { +		SYS(fail, "ip neigh add " IP4_SRC " dev src_fwd lladdr %s", src_addr); +		SYS(fail, "ip neigh add " IP6_SRC " dev src_fwd lladdr %s", src_addr); +		SYS(fail, "ip neigh add " IP4_DST " dev dst_fwd lladdr %s", MAC_DST); +		SYS(fail, "ip neigh add " IP6_DST " dev dst_fwd lladdr %s", MAC_DST); +	} +  	close_netns(nstoken);  	/** setup in 'dst' namespace */ @@ -280,6 +291,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)  	SYS(fail, "ip addr add " IP4_DST "/32 dev dst");  	SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad");  	SYS(fail, "ip link set dev dst up"); +	SYS(fail, "ip link set dev lo up");  	SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global");  	SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global"); @@ -457,7 +469,7 @@ static int set_forwarding(bool enable)  	return 0;  } -static void rcv_tstamp(int fd, const char *expected, size_t s) +static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)  {  	struct __kernel_timespec pkt_ts = {};  	char ctl[CMSG_SPACE(sizeof(pkt_ts))]; @@ -478,7 +490,7 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)  	ret = recvmsg(fd, &msg, 0);  	if (!ASSERT_EQ(ret, s, "recvmsg")) -		return; +		return -1;  	ASSERT_STRNEQ(data, expected, s, "expected rcv data");  	cmsg = CMSG_FIRSTHDR(&msg); @@ -487,6 +499,12 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)  		memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));  	pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; +	if (tstamp) { +		/* caller will check the tstamp itself */ +		*tstamp = pkt_ns; +		return 0; +	} +  	ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp");  	ret = clock_gettime(CLOCK_REALTIME, &now_ts); @@ -496,6 +514,60 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)  	if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp"))  		ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC,  			  "check rcv tstamp"); +	return 0; +} + +static void rcv_tstamp(int fd, const char *expected, size_t s) +{ +	__rcv_tstamp(fd, expected, s, NULL); +} + +static int wait_netstamp_needed_key(void) +{ +	int opt = 1, srv_fd = -1, cli_fd = -1, nretries = 0, err, n; +	char buf[] = "testing testing"; +	struct nstoken *nstoken; +	__u64 tstamp = 0; + +	nstoken = open_netns(NS_DST); +	if (!nstoken) +		return -1; + +	srv_fd = start_server(AF_INET6, SOCK_DGRAM, "::1", 0, 0); +	if (!ASSERT_GE(srv_fd, 0, "start_server")) +		goto done; + +	err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, +			 &opt, sizeof(opt)); +	if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) +		goto done; + +	cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS); +	if (!ASSERT_GE(cli_fd, 0, "connect_to_fd")) +		goto done; + +again: +	n = write(cli_fd, buf, sizeof(buf)); +	if (!ASSERT_EQ(n, sizeof(buf), "send to server")) +		goto done; +	err = __rcv_tstamp(srv_fd, buf, sizeof(buf), &tstamp); +	if (!ASSERT_OK(err, "__rcv_tstamp")) +		goto done; +	if (!tstamp && nretries++ < 5) { +		sleep(1); +		printf("netstamp_needed_key retry#%d\n", nretries); +		goto again; +	} + +done: +	if (!tstamp && srv_fd != -1) { +		close(srv_fd); +		srv_fd = -1; +	} +	if (cli_fd != -1) +		close(cli_fd); +	close_netns(nstoken); +	return srv_fd;  }  static void snd_tstamp(int fd, char *b, size_t s) @@ -832,11 +904,20 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)  {  	struct test_tc_dtime *skel;  	struct nstoken *nstoken; -	int err; +	int hold_tstamp_fd, err; + +	/* Hold a sk with the SOCK_TIMESTAMP set to ensure there +	 * is no delay in the kernel net_enable_timestamp(). +	 * This ensures the following tests must have +	 * non zero rcv tstamp in the recvmsg(). +	 */ +	hold_tstamp_fd = wait_netstamp_needed_key(); +	if (!ASSERT_GE(hold_tstamp_fd, 0, "wait_netstamp_needed_key")) +		return;  	skel = test_tc_dtime__open();  	if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open")) -		return; +		goto done;  	skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;  	skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd; @@ -881,6 +962,7 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)  done:  	test_tc_dtime__destroy(skel); +	close(hold_tstamp_fd);  }  static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result) diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c new file mode 100644 index 000000000000..eaf441dc7e79 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#define _GNU_SOURCE +#include <sched.h> +#include <stdlib.h> +#include <net/if.h> + +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "test_tcp_custom_syncookie.skel.h" + +static struct test_tcp_custom_syncookie_case { +	int family, type; +	char addr[16]; +	char name[10]; +} test_cases[] = { +	{ +		.name = "IPv4 TCP", +		.family = AF_INET, +		.type = SOCK_STREAM, +		.addr = "127.0.0.1", +	}, +	{ +		.name = "IPv6 TCP", +		.family = AF_INET6, +		.type = SOCK_STREAM, +		.addr = "::1", +	}, +}; + +static int setup_netns(void) +{ +	if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) +		return -1; + +	if (!ASSERT_OK(system("ip link set dev lo up"), "ip")) +		goto err; + +	if (!ASSERT_OK(write_sysctl("/proc/sys/net/ipv4/tcp_ecn", "1"), +		       "write_sysctl")) +		goto err; + +	return 0; +err: +	return -1; +} + +static int setup_tc(struct test_tcp_custom_syncookie *skel) +{ +	LIBBPF_OPTS(bpf_tc_hook, qdisc_lo, .attach_point = BPF_TC_INGRESS); +	LIBBPF_OPTS(bpf_tc_opts, tc_attach, +		    .prog_fd = bpf_program__fd(skel->progs.tcp_custom_syncookie)); + +	qdisc_lo.ifindex = if_nametoindex("lo"); +	if (!ASSERT_OK(bpf_tc_hook_create(&qdisc_lo), "qdisc add dev lo clsact")) +		goto err; + +	if (!ASSERT_OK(bpf_tc_attach(&qdisc_lo, &tc_attach), +		       "filter add dev lo ingress")) +		goto err; + +	return 0; +err: +	return -1; +} + +#define msg "Hello World" +#define msglen 11 + +static void transfer_message(int sender, int receiver) +{ +	char buf[msglen]; +	int ret; + +	ret = send(sender, msg, msglen, 0); +	if (!ASSERT_EQ(ret, msglen, "send")) +		return; + +	memset(buf, 0, sizeof(buf)); + +	ret = recv(receiver, buf, msglen, 0); +	if (!ASSERT_EQ(ret, msglen, "recv")) +		return; + +	ret = strncmp(buf, msg, msglen); +	if (!ASSERT_EQ(ret, 0, "strncmp")) +		return; +} + +static void create_connection(struct test_tcp_custom_syncookie_case *test_case) +{ +	int server, client, child; + +	server = start_server(test_case->family, test_case->type, test_case->addr, 0, 0); +	if (!ASSERT_NEQ(server, -1, "start_server")) +		return; + +	client = connect_to_fd(server, 0); +	if (!ASSERT_NEQ(client, -1, "connect_to_fd")) +		goto close_server; + +	child = accept(server, NULL, 0); +	if (!ASSERT_NEQ(child, -1, "accept")) +		goto close_client; + +	transfer_message(client, child); +	transfer_message(child, client); + +	close(child); +close_client: +	close(client); +close_server: +	close(server); +} + +void test_tcp_custom_syncookie(void) +{ +	struct test_tcp_custom_syncookie *skel; +	int i; + +	if (setup_netns()) +		return; + +	skel = test_tcp_custom_syncookie__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "open_and_load")) +		return; + +	if (setup_tc(skel)) +		goto destroy_skel; + +	for (i = 0; i < ARRAY_SIZE(test_cases); i++) { +		if (!test__start_subtest(test_cases[i].name)) +			continue; + +		skel->bss->handled_syn = false; +		skel->bss->handled_ack = false; + +		create_connection(&test_cases[i]); + +		ASSERT_EQ(skel->bss->handled_syn, true, "SYN is not handled at tc."); +		ASSERT_EQ(skel->bss->handled_ack, true, "ACK is not handled at tc"); +	} + +destroy_skel: +	system("tc qdisc del dev lo clsact"); + +	test_tcp_custom_syncookie__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c new file mode 100644 index 000000000000..01dc2613c8a5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> + +#include "struct_ops_maybe_null.skel.h" +#include "struct_ops_maybe_null_fail.skel.h" + +/* Test that the verifier accepts a program that access a nullable pointer + * with a proper check. + */ +static void maybe_null(void) +{ +	struct struct_ops_maybe_null *skel; + +	skel = struct_ops_maybe_null__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_and_load")) +		return; + +	struct_ops_maybe_null__destroy(skel); +} + +/* Test that the verifier rejects a program that access a nullable pointer + * without a check beforehand. + */ +static void maybe_null_fail(void) +{ +	struct struct_ops_maybe_null_fail *skel; + +	skel = struct_ops_maybe_null_fail__open_and_load(); +	if (ASSERT_ERR_PTR(skel, "struct_ops_module_fail__open_and_load")) +		return; + +	struct_ops_maybe_null_fail__destroy(skel); +} + +void test_struct_ops_maybe_null(void) +{ +	/* The verifier verifies the programs at load time, so testing both +	 * programs in the same compile-unit is complicated. We run them in +	 * separate objects to simplify the testing. +	 */ +	if (test__start_subtest("maybe_null")) +		maybe_null(); +	if (test__start_subtest("maybe_null_fail")) +		maybe_null_fail(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c new file mode 100644 index 000000000000..ee5372c7f2c7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <time.h> + +#include "struct_ops_module.skel.h" + +static void check_map_info(struct bpf_map_info *info) +{ +	struct bpf_btf_info btf_info; +	char btf_name[256]; +	u32 btf_info_len = sizeof(btf_info); +	int err, fd; + +	fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id); +	if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd")) +		return; + +	memset(&btf_info, 0, sizeof(btf_info)); +	btf_info.name = ptr_to_u64(btf_name); +	btf_info.name_len = sizeof(btf_name); +	err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len); +	if (!ASSERT_OK(err, "get_value_type_btf_obj_info")) +		goto cleanup; + +	if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name")) +		goto cleanup; + +cleanup: +	close(fd); +} + +static int attach_ops_and_check(struct struct_ops_module *skel, +				struct bpf_map *map, +				int expected_test_2_result) +{ +	struct bpf_link *link; + +	link = bpf_map__attach_struct_ops(map); +	ASSERT_OK_PTR(link, "attach_test_mod_1"); +	if (!link) +		return -1; + +	/* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */ +	ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result"); +	ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result"); + +	bpf_link__destroy(link); +	return 0; +} + +static void test_struct_ops_load(void) +{ +	struct struct_ops_module *skel; +	struct bpf_map_info info = {}; +	int err; +	u32 len; + +	skel = struct_ops_module__open(); +	if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) +		return; + +	skel->struct_ops.testmod_1->data = 13; +	skel->struct_ops.testmod_1->test_2 = skel->progs.test_3; +	/* Since test_2() is not being used, it should be disabled from +	 * auto-loading, or it will fail to load. +	 */ +	bpf_program__set_autoload(skel->progs.test_2, false); + +	err = struct_ops_module__load(skel); +	if (!ASSERT_OK(err, "struct_ops_module_load")) +		goto cleanup; + +	len = sizeof(info); +	err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info, +				     &len); +	if (!ASSERT_OK(err, "bpf_map_get_info_by_fd")) +		goto cleanup; + +	check_map_info(&info); +	/* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c +	 * +	 * In bpf_testmod.c it will pass 4 and 13 (the value of data) to +	 * .test_2.  So, the value of test_2_result should be 20 (4 + 13 + +	 * 3). +	 */ +	if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20)) +		goto cleanup; +	if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12)) +		goto cleanup; + +cleanup: +	struct_ops_module__destroy(skel); +} + +void serial_test_struct_ops_module(void) +{ +	if (test__start_subtest("test_struct_ops_load")) +		test_struct_ops_load(); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c new file mode 100644 index 000000000000..645d32b5160c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> + +#include "struct_ops_multi_pages.skel.h" + +static void do_struct_ops_multi_pages(void) +{ +	struct struct_ops_multi_pages *skel; +	struct bpf_link *link; + +	/* The size of all trampolines of skel->maps.multi_pages should be +	 * over 1 page (at least for x86). +	 */ +	skel = struct_ops_multi_pages__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "struct_ops_multi_pages_open_and_load")) +		return; + +	link = bpf_map__attach_struct_ops(skel->maps.multi_pages); +	ASSERT_OK_PTR(link, "attach_multi_pages"); + +	bpf_link__destroy(link); +	struct_ops_multi_pages__destroy(skel); +} + +void test_struct_ops_multi_pages(void) +{ +	if (test__start_subtest("multi_pages")) +		do_struct_ops_multi_pages(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c new file mode 100644 index 000000000000..106ea447965a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <testing_helpers.h> + +static void load_bpf_test_no_cfi(void) +{ +	int fd; +	int err; + +	fd = open("bpf_test_no_cfi.ko", O_RDONLY); +	if (!ASSERT_GE(fd, 0, "open")) +		return; + +	/* The module will try to register a struct_ops type without +	 * cfi_stubs and with cfi_stubs. +	 * +	 * The one without cfi_stub should fail. The module will be loaded +	 * successfully only if the result of the registration is as +	 * expected, or it fails. +	 */ +	err = finit_module(fd, "", 0); +	close(fd); +	if (!ASSERT_OK(err, "finit_module")) +		return; + +	err = delete_module("bpf_test_no_cfi", 0); +	ASSERT_OK(err, "delete_module"); +} + +void test_struct_ops_no_cfi(void) +{ +	if (test__start_subtest("load_bpf_test_no_cfi")) +		load_bpf_test_no_cfi(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c index 2b3c6dd66259..5f1fb0a2ea56 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -118,9 +118,9 @@ fail:  static void cleanup(void)  {  	SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0"); -	SYS_NOFAIL("ip link del veth1 2> /dev/null"); -	SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1); -	SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1); +	SYS_NOFAIL("ip link del veth1"); +	SYS_NOFAIL("ip link del %s", VXLAN_TUNL_DEV1); +	SYS_NOFAIL("ip link del %s", IP6VXLAN_TUNL_DEV1);  }  static int add_vxlan_tunnel(void) @@ -265,9 +265,9 @@ fail:  static void delete_ipip_tunnel(void)  {  	SYS_NOFAIL("ip -n at_ns0 link delete dev %s", IPIP_TUNL_DEV0); -	SYS_NOFAIL("ip -n at_ns0 fou del port 5555 2> /dev/null"); +	SYS_NOFAIL("ip -n at_ns0 fou del port 5555");  	SYS_NOFAIL("ip link delete dev %s", IPIP_TUNL_DEV1); -	SYS_NOFAIL("ip fou del port 5555 2> /dev/null"); +	SYS_NOFAIL("ip fou del port 5555");  }  static int add_xfrm_tunnel(void) @@ -346,13 +346,13 @@ fail:  static void delete_xfrm_tunnel(void)  { -	SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32 2> /dev/null", +	SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32",  		   IP4_ADDR_TUNL_DEV1, IP4_ADDR_TUNL_DEV0); -	SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32 2> /dev/null", +	SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32",  		   IP4_ADDR_TUNL_DEV0, IP4_ADDR_TUNL_DEV1); -	SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d 2> /dev/null", +	SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d",  		   IP4_ADDR_VETH0, IP4_ADDR1_VETH1, XFRM_SPI_IN_TO_OUT); -	SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d 2> /dev/null", +	SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d",  		   IP4_ADDR1_VETH1, IP4_ADDR_VETH0, XFRM_SPI_OUT_TO_IN);  } diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c index 760ad96b4be0..d66687f1ee6a 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer.c +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -4,10 +4,29 @@  #include "timer.skel.h"  #include "timer_failure.skel.h" +#define NUM_THR 8 + +static void *spin_lock_thread(void *arg) +{ +	int i, err, prog_fd = *(int *)arg; +	LIBBPF_OPTS(bpf_test_run_opts, topts); + +	for (i = 0; i < 10000; i++) { +		err = bpf_prog_test_run_opts(prog_fd, &topts); +		if (!ASSERT_OK(err, "test_run_opts err") || +		    !ASSERT_OK(topts.retval, "test_run_opts retval")) +			break; +	} + +	pthread_exit(arg); +} +  static int timer(struct timer *timer_skel)  { -	int err, prog_fd; +	int i, err, prog_fd;  	LIBBPF_OPTS(bpf_test_run_opts, topts); +	pthread_t thread_id[NUM_THR]; +	void *ret;  	err = timer__attach(timer_skel);  	if (!ASSERT_OK(err, "timer_attach")) @@ -43,6 +62,20 @@ static int timer(struct timer *timer_skel)  	/* check that code paths completed */  	ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok"); +	prog_fd = bpf_program__fd(timer_skel->progs.race); +	for (i = 0; i < NUM_THR; i++) { +		err = pthread_create(&thread_id[i], NULL, +				     &spin_lock_thread, &prog_fd); +		if (!ASSERT_OK(err, "pthread_create")) +			break; +	} + +	while (i) { +		err = pthread_join(thread_id[--i], &ret); +		if (ASSERT_OK(err, "pthread_join")) +			ASSERT_EQ(ret, (void *)&prog_fd, "pthread_join"); +	} +  	return 0;  } diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c new file mode 100644 index 000000000000..fc4a175d8d76 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/token.c @@ -0,0 +1,1052 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#define _GNU_SOURCE +#include <test_progs.h> +#include <bpf/btf.h> +#include "cap_helpers.h" +#include <fcntl.h> +#include <sched.h> +#include <signal.h> +#include <unistd.h> +#include <linux/filter.h> +#include <linux/unistd.h> +#include <linux/mount.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/syscall.h> +#include <sys/un.h> +#include "priv_map.skel.h" +#include "priv_prog.skel.h" +#include "dummy_st_ops_success.skel.h" +#include "token_lsm.skel.h" + +static inline int sys_mount(const char *dev_name, const char *dir_name, +			    const char *type, unsigned long flags, +			    const void *data) +{ +	return syscall(__NR_mount, dev_name, dir_name, type, flags, data); +} + +static inline int sys_fsopen(const char *fsname, unsigned flags) +{ +	return syscall(__NR_fsopen, fsname, flags); +} + +static inline int sys_fspick(int dfd, const char *path, unsigned flags) +{ +	return syscall(__NR_fspick, dfd, path, flags); +} + +static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux) +{ +	return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux); +} + +static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags) +{ +	return syscall(__NR_fsmount, fs_fd, flags, ms_flags); +} + +static inline int sys_move_mount(int from_dfd, const char *from_path, +				 int to_dfd, const char *to_path, +				 unsigned flags) +{ +	return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, flags); +} + +static int drop_priv_caps(__u64 *old_caps) +{ +	return cap_disable_effective((1ULL << CAP_BPF) | +				     (1ULL << CAP_PERFMON) | +				     (1ULL << CAP_NET_ADMIN) | +				     (1ULL << CAP_SYS_ADMIN), old_caps); +} + +static int restore_priv_caps(__u64 old_caps) +{ +	return cap_enable_effective(old_caps, NULL); +} + +static int set_delegate_mask(int fs_fd, const char *key, __u64 mask, const char *mask_str) +{ +	char buf[32]; +	int err; + +	if (!mask_str) { +		if (mask == ~0ULL) { +			mask_str = "any"; +		} else { +			snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)mask); +			mask_str = buf; +		} +	} + +	err = sys_fsconfig(fs_fd, FSCONFIG_SET_STRING, key, +			   mask_str, 0); +	if (err < 0) +		err = -errno; +	return err; +} + +#define zclose(fd) do { if (fd >= 0) close(fd); fd = -1; } while (0) + +struct bpffs_opts { +	__u64 cmds; +	__u64 maps; +	__u64 progs; +	__u64 attachs; +	const char *cmds_str; +	const char *maps_str; +	const char *progs_str; +	const char *attachs_str; +}; + +static int create_bpffs_fd(void) +{ +	int fs_fd; + +	/* create VFS context */ +	fs_fd = sys_fsopen("bpf", 0); +	ASSERT_GE(fs_fd, 0, "fs_fd"); + +	return fs_fd; +} + +static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts) +{ +	int mnt_fd, err; + +	/* set up token delegation mount options */ +	err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str); +	if (!ASSERT_OK(err, "fs_cfg_cmds")) +		return err; +	err = set_delegate_mask(fs_fd, "delegate_maps", opts->maps, opts->maps_str); +	if (!ASSERT_OK(err, "fs_cfg_maps")) +		return err; +	err = set_delegate_mask(fs_fd, "delegate_progs", opts->progs, opts->progs_str); +	if (!ASSERT_OK(err, "fs_cfg_progs")) +		return err; +	err = set_delegate_mask(fs_fd, "delegate_attachs", opts->attachs, opts->attachs_str); +	if (!ASSERT_OK(err, "fs_cfg_attachs")) +		return err; + +	/* instantiate FS object */ +	err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0); +	if (err < 0) +		return -errno; + +	/* create O_PATH fd for detached mount */ +	mnt_fd = sys_fsmount(fs_fd, 0, 0); +	if (err < 0) +		return -errno; + +	return mnt_fd; +} + +/* send FD over Unix domain (AF_UNIX) socket */ +static int sendfd(int sockfd, int fd) +{ +	struct msghdr msg = {}; +	struct cmsghdr *cmsg; +	int fds[1] = { fd }, err; +	char iobuf[1]; +	struct iovec io = { +		.iov_base = iobuf, +		.iov_len = sizeof(iobuf), +	}; +	union { +		char buf[CMSG_SPACE(sizeof(fds))]; +		struct cmsghdr align; +	} u; + +	msg.msg_iov = &io; +	msg.msg_iovlen = 1; +	msg.msg_control = u.buf; +	msg.msg_controllen = sizeof(u.buf); +	cmsg = CMSG_FIRSTHDR(&msg); +	cmsg->cmsg_level = SOL_SOCKET; +	cmsg->cmsg_type = SCM_RIGHTS; +	cmsg->cmsg_len = CMSG_LEN(sizeof(fds)); +	memcpy(CMSG_DATA(cmsg), fds, sizeof(fds)); + +	err = sendmsg(sockfd, &msg, 0); +	if (err < 0) +		err = -errno; +	if (!ASSERT_EQ(err, 1, "sendmsg")) +		return -EINVAL; + +	return 0; +} + +/* receive FD over Unix domain (AF_UNIX) socket */ +static int recvfd(int sockfd, int *fd) +{ +	struct msghdr msg = {}; +	struct cmsghdr *cmsg; +	int fds[1], err; +	char iobuf[1]; +	struct iovec io = { +		.iov_base = iobuf, +		.iov_len = sizeof(iobuf), +	}; +	union { +		char buf[CMSG_SPACE(sizeof(fds))]; +		struct cmsghdr align; +	} u; + +	msg.msg_iov = &io; +	msg.msg_iovlen = 1; +	msg.msg_control = u.buf; +	msg.msg_controllen = sizeof(u.buf); + +	err = recvmsg(sockfd, &msg, 0); +	if (err < 0) +		err = -errno; +	if (!ASSERT_EQ(err, 1, "recvmsg")) +		return -EINVAL; + +	cmsg = CMSG_FIRSTHDR(&msg); +	if (!ASSERT_OK_PTR(cmsg, "cmsg_null") || +	    !ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(fds)), "cmsg_len") || +	    !ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET, "cmsg_level") || +	    !ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS, "cmsg_type")) +		return -EINVAL; + +	memcpy(fds, CMSG_DATA(cmsg), sizeof(fds)); +	*fd = fds[0]; + +	return 0; +} + +static ssize_t write_nointr(int fd, const void *buf, size_t count) +{ +	ssize_t ret; + +	do { +		ret = write(fd, buf, count); +	} while (ret < 0 && errno == EINTR); + +	return ret; +} + +static int write_file(const char *path, const void *buf, size_t count) +{ +	int fd; +	ssize_t ret; + +	fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW); +	if (fd < 0) +		return -1; + +	ret = write_nointr(fd, buf, count); +	close(fd); +	if (ret < 0 || (size_t)ret != count) +		return -1; + +	return 0; +} + +static int create_and_enter_userns(void) +{ +	uid_t uid; +	gid_t gid; +	char map[100]; + +	uid = getuid(); +	gid = getgid(); + +	if (unshare(CLONE_NEWUSER)) +		return -1; + +	if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) && +	    errno != ENOENT) +		return -1; + +	snprintf(map, sizeof(map), "0 %d 1", uid); +	if (write_file("/proc/self/uid_map", map, strlen(map))) +		return -1; + + +	snprintf(map, sizeof(map), "0 %d 1", gid); +	if (write_file("/proc/self/gid_map", map, strlen(map))) +		return -1; + +	if (setgid(0)) +		return -1; + +	if (setuid(0)) +		return -1; + +	return 0; +} + +typedef int (*child_callback_fn)(int bpffs_fd, struct token_lsm *lsm_skel); + +static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callback) +{ +	int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1; +	struct token_lsm *lsm_skel = NULL; + +	/* load and attach LSM "policy" before we go into unpriv userns */ +	lsm_skel = token_lsm__open_and_load(); +	if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel_load")) { +		err = -EINVAL; +		goto cleanup; +	} +	lsm_skel->bss->my_pid = getpid(); +	err = token_lsm__attach(lsm_skel); +	if (!ASSERT_OK(err, "lsm_skel_attach")) +		goto cleanup; + +	/* setup userns with root mappings */ +	err = create_and_enter_userns(); +	if (!ASSERT_OK(err, "create_and_enter_userns")) +		goto cleanup; + +	/* setup mountns to allow creating BPF FS (fsopen("bpf")) from unpriv process */ +	err = unshare(CLONE_NEWNS); +	if (!ASSERT_OK(err, "create_mountns")) +		goto cleanup; + +	err = sys_mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0); +	if (!ASSERT_OK(err, "remount_root")) +		goto cleanup; + +	fs_fd = create_bpffs_fd(); +	if (!ASSERT_GE(fs_fd, 0, "create_bpffs_fd")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* ensure unprivileged child cannot set delegation options */ +	err = set_delegate_mask(fs_fd, "delegate_cmds", 0x1, NULL); +	ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm"); +	err = set_delegate_mask(fs_fd, "delegate_maps", 0x1, NULL); +	ASSERT_EQ(err, -EPERM, "delegate_maps_eperm"); +	err = set_delegate_mask(fs_fd, "delegate_progs", 0x1, NULL); +	ASSERT_EQ(err, -EPERM, "delegate_progs_eperm"); +	err = set_delegate_mask(fs_fd, "delegate_attachs", 0x1, NULL); +	ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm"); + +	/* pass BPF FS context object to parent */ +	err = sendfd(sock_fd, fs_fd); +	if (!ASSERT_OK(err, "send_fs_fd")) +		goto cleanup; +	zclose(fs_fd); + +	/* avoid mucking around with mount namespaces and mounting at +	 * well-known path, just get detach-mounted BPF FS fd back from parent +	 */ +	err = recvfd(sock_fd, &mnt_fd); +	if (!ASSERT_OK(err, "recv_mnt_fd")) +		goto cleanup; + +	/* try to fspick() BPF FS and try to add some delegation options */ +	fs_fd = sys_fspick(mnt_fd, "", FSPICK_EMPTY_PATH); +	if (!ASSERT_GE(fs_fd, 0, "bpffs_fspick")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* ensure unprivileged child cannot reconfigure to set delegation options */ +	err = set_delegate_mask(fs_fd, "delegate_cmds", 0, "any"); +	if (!ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm_reconfig")) { +		err = -EINVAL; +		goto cleanup; +	} +	err = set_delegate_mask(fs_fd, "delegate_maps", 0, "any"); +	if (!ASSERT_EQ(err, -EPERM, "delegate_maps_eperm_reconfig")) { +		err = -EINVAL; +		goto cleanup; +	} +	err = set_delegate_mask(fs_fd, "delegate_progs", 0, "any"); +	if (!ASSERT_EQ(err, -EPERM, "delegate_progs_eperm_reconfig")) { +		err = -EINVAL; +		goto cleanup; +	} +	err = set_delegate_mask(fs_fd, "delegate_attachs", 0, "any"); +	if (!ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm_reconfig")) { +		err = -EINVAL; +		goto cleanup; +	} +	zclose(fs_fd); + +	bpffs_fd = openat(mnt_fd, ".", 0, O_RDWR); +	if (!ASSERT_GE(bpffs_fd, 0, "bpffs_open")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* create BPF token FD and pass it to parent for some extra checks */ +	token_fd = bpf_token_create(bpffs_fd, NULL); +	if (!ASSERT_GT(token_fd, 0, "child_token_create")) { +		err = -EINVAL; +		goto cleanup; +	} +	err = sendfd(sock_fd, token_fd); +	if (!ASSERT_OK(err, "send_token_fd")) +		goto cleanup; +	zclose(token_fd); + +	/* do custom test logic with customly set up BPF FS instance */ +	err = callback(bpffs_fd, lsm_skel); +	if (!ASSERT_OK(err, "test_callback")) +		goto cleanup; + +	err = 0; +cleanup: +	zclose(sock_fd); +	zclose(mnt_fd); +	zclose(fs_fd); +	zclose(bpffs_fd); +	zclose(token_fd); + +	lsm_skel->bss->my_pid = 0; +	token_lsm__destroy(lsm_skel); + +	exit(-err); +} + +static int wait_for_pid(pid_t pid) +{ +	int status, ret; + +again: +	ret = waitpid(pid, &status, 0); +	if (ret == -1) { +		if (errno == EINTR) +			goto again; + +		return -1; +	} + +	if (!WIFEXITED(status)) +		return -1; + +	return WEXITSTATUS(status); +} + +static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd) +{ +	int fs_fd = -1, mnt_fd = -1, token_fd = -1, err; + +	err = recvfd(sock_fd, &fs_fd); +	if (!ASSERT_OK(err, "recv_bpffs_fd")) +		goto cleanup; + +	mnt_fd = materialize_bpffs_fd(fs_fd, bpffs_opts); +	if (!ASSERT_GE(mnt_fd, 0, "materialize_bpffs_fd")) { +		err = -EINVAL; +		goto cleanup; +	} +	zclose(fs_fd); + +	/* pass BPF FS context object to parent */ +	err = sendfd(sock_fd, mnt_fd); +	if (!ASSERT_OK(err, "send_mnt_fd")) +		goto cleanup; +	zclose(mnt_fd); + +	/* receive BPF token FD back from child for some extra tests */ +	err = recvfd(sock_fd, &token_fd); +	if (!ASSERT_OK(err, "recv_token_fd")) +		goto cleanup; + +	err = wait_for_pid(child_pid); +	ASSERT_OK(err, "waitpid_child"); + +cleanup: +	zclose(sock_fd); +	zclose(fs_fd); +	zclose(mnt_fd); +	zclose(token_fd); + +	if (child_pid > 0) +		(void)kill(child_pid, SIGKILL); +} + +static void subtest_userns(struct bpffs_opts *bpffs_opts, +			   child_callback_fn child_cb) +{ +	int sock_fds[2] = { -1, -1 }; +	int child_pid = 0, err; + +	err = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds); +	if (!ASSERT_OK(err, "socketpair")) +		goto cleanup; + +	child_pid = fork(); +	if (!ASSERT_GE(child_pid, 0, "fork")) +		goto cleanup; + +	if (child_pid == 0) { +		zclose(sock_fds[0]); +		return child(sock_fds[1], bpffs_opts, child_cb); + +	} else { +		zclose(sock_fds[1]); +		return parent(child_pid, bpffs_opts, sock_fds[0]); +	} + +cleanup: +	zclose(sock_fds[0]); +	zclose(sock_fds[1]); +	if (child_pid > 0) +		(void)kill(child_pid, SIGKILL); +} + +static int userns_map_create(int mnt_fd, struct token_lsm *lsm_skel) +{ +	LIBBPF_OPTS(bpf_map_create_opts, map_opts); +	int err, token_fd = -1, map_fd = -1; +	__u64 old_caps = 0; + +	/* create BPF token from BPF FS mount */ +	token_fd = bpf_token_create(mnt_fd, NULL); +	if (!ASSERT_GT(token_fd, 0, "token_create")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* while inside non-init userns, we need both a BPF token *and* +	 * CAP_BPF inside current userns to create privileged map; let's test +	 * that neither BPF token alone nor namespaced CAP_BPF is sufficient +	 */ +	err = drop_priv_caps(&old_caps); +	if (!ASSERT_OK(err, "drop_caps")) +		goto cleanup; + +	/* no token, no CAP_BPF -> fail */ +	map_opts.map_flags = 0; +	map_opts.token_fd = 0; +	map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts); +	if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* token without CAP_BPF -> fail */ +	map_opts.map_flags = BPF_F_TOKEN_FD; +	map_opts.token_fd = token_fd; +	map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts); +	if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */ +	err = restore_priv_caps(old_caps); +	if (!ASSERT_OK(err, "restore_caps")) +		goto cleanup; + +	/* CAP_BPF without token -> fail */ +	map_opts.map_flags = 0; +	map_opts.token_fd = 0; +	map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts); +	if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* finally, namespaced CAP_BPF + token -> success */ +	map_opts.map_flags = BPF_F_TOKEN_FD; +	map_opts.token_fd = token_fd; +	map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts); +	if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) { +		err = -EINVAL; +		goto cleanup; +	} + +cleanup: +	zclose(token_fd); +	zclose(map_fd); +	return err; +} + +static int userns_btf_load(int mnt_fd, struct token_lsm *lsm_skel) +{ +	LIBBPF_OPTS(bpf_btf_load_opts, btf_opts); +	int err, token_fd = -1, btf_fd = -1; +	const void *raw_btf_data; +	struct btf *btf = NULL; +	__u32 raw_btf_size; +	__u64 old_caps = 0; + +	/* create BPF token from BPF FS mount */ +	token_fd = bpf_token_create(mnt_fd, NULL); +	if (!ASSERT_GT(token_fd, 0, "token_create")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* while inside non-init userns, we need both a BPF token *and* +	 * CAP_BPF inside current userns to create privileged map; let's test +	 * that neither BPF token alone nor namespaced CAP_BPF is sufficient +	 */ +	err = drop_priv_caps(&old_caps); +	if (!ASSERT_OK(err, "drop_caps")) +		goto cleanup; + +	/* setup a trivial BTF data to load to the kernel */ +	btf = btf__new_empty(); +	if (!ASSERT_OK_PTR(btf, "empty_btf")) +		goto cleanup; + +	ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type"); + +	raw_btf_data = btf__raw_data(btf, &raw_btf_size); +	if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data")) +		goto cleanup; + +	/* no token + no CAP_BPF -> failure */ +	btf_opts.btf_flags = 0; +	btf_opts.token_fd = 0; +	btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); +	if (!ASSERT_LT(btf_fd, 0, "no_token_no_cap_should_fail")) +		goto cleanup; + +	/* token + no CAP_BPF -> failure */ +	btf_opts.btf_flags = BPF_F_TOKEN_FD; +	btf_opts.token_fd = token_fd; +	btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); +	if (!ASSERT_LT(btf_fd, 0, "token_no_cap_should_fail")) +		goto cleanup; + +	/* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */ +	err = restore_priv_caps(old_caps); +	if (!ASSERT_OK(err, "restore_caps")) +		goto cleanup; + +	/* token + CAP_BPF -> success */ +	btf_opts.btf_flags = BPF_F_TOKEN_FD; +	btf_opts.token_fd = token_fd; +	btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); +	if (!ASSERT_GT(btf_fd, 0, "token_and_cap_success")) +		goto cleanup; + +	err = 0; +cleanup: +	btf__free(btf); +	zclose(btf_fd); +	zclose(token_fd); +	return err; +} + +static int userns_prog_load(int mnt_fd, struct token_lsm *lsm_skel) +{ +	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts); +	int err, token_fd = -1, prog_fd = -1; +	struct bpf_insn insns[] = { +		/* bpf_jiffies64() requires CAP_BPF */ +		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), +		/* bpf_get_current_task() requires CAP_PERFMON */ +		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_task), +		/* r0 = 0; exit; */ +		BPF_MOV64_IMM(BPF_REG_0, 0), +		BPF_EXIT_INSN(), +	}; +	size_t insn_cnt = ARRAY_SIZE(insns); +	__u64 old_caps = 0; + +	/* create BPF token from BPF FS mount */ +	token_fd = bpf_token_create(mnt_fd, NULL); +	if (!ASSERT_GT(token_fd, 0, "token_create")) { +		err = -EINVAL; +		goto cleanup; +	} + +	/* validate we can successfully load BPF program with token; this +	 * being XDP program (CAP_NET_ADMIN) using bpf_jiffies64() (CAP_BPF) +	 * and bpf_get_current_task() (CAP_PERFMON) helpers validates we have +	 * BPF token wired properly in a bunch of places in the kernel +	 */ +	prog_opts.prog_flags = BPF_F_TOKEN_FD; +	prog_opts.token_fd = token_fd; +	prog_opts.expected_attach_type = BPF_XDP; +	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", +				insns, insn_cnt, &prog_opts); +	if (!ASSERT_GT(prog_fd, 0, "prog_fd")) { +		err = -EPERM; +		goto cleanup; +	} + +	/* no token + caps -> failure */ +	prog_opts.prog_flags = 0; +	prog_opts.token_fd = 0; +	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", +				insns, insn_cnt, &prog_opts); +	if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { +		err = -EPERM; +		goto cleanup; +	} + +	err = drop_priv_caps(&old_caps); +	if (!ASSERT_OK(err, "drop_caps")) +		goto cleanup; + +	/* no caps + token -> failure */ +	prog_opts.prog_flags = BPF_F_TOKEN_FD; +	prog_opts.token_fd = token_fd; +	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", +				insns, insn_cnt, &prog_opts); +	if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { +		err = -EPERM; +		goto cleanup; +	} + +	/* no caps + no token -> definitely a failure */ +	prog_opts.prog_flags = 0; +	prog_opts.token_fd = 0; +	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", +				insns, insn_cnt, &prog_opts); +	if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { +		err = -EPERM; +		goto cleanup; +	} + +	err = 0; +cleanup: +	zclose(prog_fd); +	zclose(token_fd); +	return err; +} + +static int userns_obj_priv_map(int mnt_fd, struct token_lsm *lsm_skel) +{ +	LIBBPF_OPTS(bpf_object_open_opts, opts); +	char buf[256]; +	struct priv_map *skel; +	int err; + +	skel = priv_map__open_and_load(); +	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { +		priv_map__destroy(skel); +		return -EINVAL; +	} + +	/* use bpf_token_path to provide BPF FS path */ +	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); +	opts.bpf_token_path = buf; +	skel = priv_map__open_opts(&opts); +	if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) +		return -EINVAL; + +	err = priv_map__load(skel); +	priv_map__destroy(skel); +	if (!ASSERT_OK(err, "obj_token_path_load")) +		return -EINVAL; + +	return 0; +} + +static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel) +{ +	LIBBPF_OPTS(bpf_object_open_opts, opts); +	char buf[256]; +	struct priv_prog *skel; +	int err; + +	skel = priv_prog__open_and_load(); +	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { +		priv_prog__destroy(skel); +		return -EINVAL; +	} + +	/* use bpf_token_path to provide BPF FS path */ +	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); +	opts.bpf_token_path = buf; +	skel = priv_prog__open_opts(&opts); +	if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) +		return -EINVAL; +	err = priv_prog__load(skel); +	priv_prog__destroy(skel); +	if (!ASSERT_OK(err, "obj_token_path_load")) +		return -EINVAL; + +	/* provide BPF token, but reject bpf_token_capable() with LSM */ +	lsm_skel->bss->reject_capable = true; +	lsm_skel->bss->reject_cmd = false; +	skel = priv_prog__open_opts(&opts); +	if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cap_open")) +		return -EINVAL; +	err = priv_prog__load(skel); +	priv_prog__destroy(skel); +	if (!ASSERT_ERR(err, "obj_token_lsm_reject_cap_load")) +		return -EINVAL; + +	/* provide BPF token, but reject bpf_token_cmd() with LSM */ +	lsm_skel->bss->reject_capable = false; +	lsm_skel->bss->reject_cmd = true; +	skel = priv_prog__open_opts(&opts); +	if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cmd_open")) +		return -EINVAL; +	err = priv_prog__load(skel); +	priv_prog__destroy(skel); +	if (!ASSERT_ERR(err, "obj_token_lsm_reject_cmd_load")) +		return -EINVAL; + +	return 0; +} + +/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command, + * which should cause struct_ops application to fail, as BTF won't be uploaded + * into the kernel, even if STRUCT_OPS programs themselves are allowed + */ +static int validate_struct_ops_load(int mnt_fd, bool expect_success) +{ +	LIBBPF_OPTS(bpf_object_open_opts, opts); +	char buf[256]; +	struct dummy_st_ops_success *skel; +	int err; + +	snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); +	opts.bpf_token_path = buf; +	skel = dummy_st_ops_success__open_opts(&opts); +	if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) +		return -EINVAL; + +	err = dummy_st_ops_success__load(skel); +	dummy_st_ops_success__destroy(skel); +	if (expect_success) { +		if (!ASSERT_OK(err, "obj_token_path_load")) +			return -EINVAL; +	} else /* expect failure */ { +		if (!ASSERT_ERR(err, "obj_token_path_load")) +			return -EINVAL; +	} + +	return 0; +} + +static int userns_obj_priv_btf_fail(int mnt_fd, struct token_lsm *lsm_skel) +{ +	return validate_struct_ops_load(mnt_fd, false /* should fail */); +} + +static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel) +{ +	return validate_struct_ops_load(mnt_fd, true /* should succeed */); +} + +#define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH" +#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs" + +static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel) +{ +	LIBBPF_OPTS(bpf_object_open_opts, opts); +	struct dummy_st_ops_success *skel; +	int err; + +	/* before we mount BPF FS with token delegation, struct_ops skeleton +	 * should fail to load +	 */ +	skel = dummy_st_ops_success__open_and_load(); +	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { +		dummy_st_ops_success__destroy(skel); +		return -EINVAL; +	} + +	/* mount custom BPF FS over /sys/fs/bpf so that libbpf can create BPF +	 * token automatically and implicitly +	 */ +	err = sys_move_mount(mnt_fd, "", AT_FDCWD, "/sys/fs/bpf", MOVE_MOUNT_F_EMPTY_PATH); +	if (!ASSERT_OK(err, "move_mount_bpffs")) +		return -EINVAL; + +	/* disable implicit BPF token creation by setting +	 * LIBBPF_BPF_TOKEN_PATH envvar to empty value, load should fail +	 */ +	err = setenv(TOKEN_ENVVAR, "", 1 /*overwrite*/); +	if (!ASSERT_OK(err, "setenv_token_path")) +		return -EINVAL; +	skel = dummy_st_ops_success__open_and_load(); +	if (!ASSERT_ERR_PTR(skel, "obj_token_envvar_disabled_load")) { +		unsetenv(TOKEN_ENVVAR); +		dummy_st_ops_success__destroy(skel); +		return -EINVAL; +	} +	unsetenv(TOKEN_ENVVAR); + +	/* now the same struct_ops skeleton should succeed thanks to libppf +	 * creating BPF token from /sys/fs/bpf mount point +	 */ +	skel = dummy_st_ops_success__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load")) +		return -EINVAL; + +	dummy_st_ops_success__destroy(skel); + +	/* now disable implicit token through empty bpf_token_path, should fail */ +	opts.bpf_token_path = ""; +	skel = dummy_st_ops_success__open_opts(&opts); +	if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open")) +		return -EINVAL; + +	err = dummy_st_ops_success__load(skel); +	dummy_st_ops_success__destroy(skel); +	if (!ASSERT_ERR(err, "obj_empty_token_path_load")) +		return -EINVAL; + +	return 0; +} + +static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel) +{ +	LIBBPF_OPTS(bpf_object_open_opts, opts); +	struct dummy_st_ops_success *skel; +	int err; + +	/* before we mount BPF FS with token delegation, struct_ops skeleton +	 * should fail to load +	 */ +	skel = dummy_st_ops_success__open_and_load(); +	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { +		dummy_st_ops_success__destroy(skel); +		return -EINVAL; +	} + +	/* mount custom BPF FS over custom location, so libbpf can't create +	 * BPF token implicitly, unless pointed to it through +	 * LIBBPF_BPF_TOKEN_PATH envvar +	 */ +	rmdir(TOKEN_BPFFS_CUSTOM); +	if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom")) +		goto err_out; +	err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH); +	if (!ASSERT_OK(err, "move_mount_bpffs")) +		goto err_out; + +	/* even though we have BPF FS with delegation, it's not at default +	 * /sys/fs/bpf location, so we still fail to load until envvar is set up +	 */ +	skel = dummy_st_ops_success__open_and_load(); +	if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load2")) { +		dummy_st_ops_success__destroy(skel); +		goto err_out; +	} + +	err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/); +	if (!ASSERT_OK(err, "setenv_token_path")) +		goto err_out; + +	/* now the same struct_ops skeleton should succeed thanks to libppf +	 * creating BPF token from custom mount point +	 */ +	skel = dummy_st_ops_success__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load")) +		goto err_out; + +	dummy_st_ops_success__destroy(skel); + +	/* now disable implicit token through empty bpf_token_path, envvar +	 * will be ignored, should fail +	 */ +	opts.bpf_token_path = ""; +	skel = dummy_st_ops_success__open_opts(&opts); +	if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open")) +		goto err_out; + +	err = dummy_st_ops_success__load(skel); +	dummy_st_ops_success__destroy(skel); +	if (!ASSERT_ERR(err, "obj_empty_token_path_load")) +		goto err_out; + +	rmdir(TOKEN_BPFFS_CUSTOM); +	unsetenv(TOKEN_ENVVAR); +	return 0; +err_out: +	rmdir(TOKEN_BPFFS_CUSTOM); +	unsetenv(TOKEN_ENVVAR); +	return -EINVAL; +} + +#define bit(n) (1ULL << (n)) + +void test_token(void) +{ +	if (test__start_subtest("map_token")) { +		struct bpffs_opts opts = { +			.cmds_str = "map_create", +			.maps_str = "stack", +		}; + +		subtest_userns(&opts, userns_map_create); +	} +	if (test__start_subtest("btf_token")) { +		struct bpffs_opts opts = { +			.cmds = 1ULL << BPF_BTF_LOAD, +		}; + +		subtest_userns(&opts, userns_btf_load); +	} +	if (test__start_subtest("prog_token")) { +		struct bpffs_opts opts = { +			.cmds_str = "PROG_LOAD", +			.progs_str = "XDP", +			.attachs_str = "xdp", +		}; + +		subtest_userns(&opts, userns_prog_load); +	} +	if (test__start_subtest("obj_priv_map")) { +		struct bpffs_opts opts = { +			.cmds = bit(BPF_MAP_CREATE), +			.maps = bit(BPF_MAP_TYPE_QUEUE), +		}; + +		subtest_userns(&opts, userns_obj_priv_map); +	} +	if (test__start_subtest("obj_priv_prog")) { +		struct bpffs_opts opts = { +			.cmds = bit(BPF_PROG_LOAD), +			.progs = bit(BPF_PROG_TYPE_KPROBE), +			.attachs = ~0ULL, +		}; + +		subtest_userns(&opts, userns_obj_priv_prog); +	} +	if (test__start_subtest("obj_priv_btf_fail")) { +		struct bpffs_opts opts = { +			/* disallow BTF loading */ +			.cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), +			.maps = bit(BPF_MAP_TYPE_STRUCT_OPS), +			.progs = bit(BPF_PROG_TYPE_STRUCT_OPS), +			.attachs = ~0ULL, +		}; + +		subtest_userns(&opts, userns_obj_priv_btf_fail); +	} +	if (test__start_subtest("obj_priv_btf_success")) { +		struct bpffs_opts opts = { +			/* allow BTF loading */ +			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), +			.maps = bit(BPF_MAP_TYPE_STRUCT_OPS), +			.progs = bit(BPF_PROG_TYPE_STRUCT_OPS), +			.attachs = ~0ULL, +		}; + +		subtest_userns(&opts, userns_obj_priv_btf_success); +	} +	if (test__start_subtest("obj_priv_implicit_token")) { +		struct bpffs_opts opts = { +			/* allow BTF loading */ +			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), +			.maps = bit(BPF_MAP_TYPE_STRUCT_OPS), +			.progs = bit(BPF_PROG_TYPE_STRUCT_OPS), +			.attachs = ~0ULL, +		}; + +		subtest_userns(&opts, userns_obj_priv_implicit_token); +	} +	if (test__start_subtest("obj_priv_implicit_token_envvar")) { +		struct bpffs_opts opts = { +			/* allow BTF loading */ +			.cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), +			.maps = bit(BPF_MAP_TYPE_STRUCT_OPS), +			.progs = bit(BPF_PROG_TYPE_STRUCT_OPS), +			.attachs = ~0ULL, +		}; + +		subtest_userns(&opts, userns_obj_priv_implicit_token_envvar); +	} +} diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c new file mode 100644 index 000000000000..a222df765bc3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include "tracing_failure.skel.h" + +static void test_bpf_spin_lock(bool is_spin_lock) +{ +	struct tracing_failure *skel; +	int err; + +	skel = tracing_failure__open(); +	if (!ASSERT_OK_PTR(skel, "tracing_failure__open")) +		return; + +	if (is_spin_lock) +		bpf_program__set_autoload(skel->progs.test_spin_lock, true); +	else +		bpf_program__set_autoload(skel->progs.test_spin_unlock, true); + +	err = tracing_failure__load(skel); +	if (!ASSERT_OK(err, "tracing_failure__load")) +		goto out; + +	err = tracing_failure__attach(skel); +	ASSERT_ERR(err, "tracing_failure__attach"); + +out: +	tracing_failure__destroy(skel); +} + +void test_tracing_failure(void) +{ +	if (test__start_subtest("bpf_spin_lock")) +		test_bpf_spin_lock(true); +	if (test__start_subtest("bpf_spin_unlock")) +		test_bpf_spin_lock(false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d62c5bf00e71..985273832f89 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -4,6 +4,7 @@  #include "cap_helpers.h"  #include "verifier_and.skel.h" +#include "verifier_arena.skel.h"  #include "verifier_array_access.skel.h"  #include "verifier_basic_stack.skel.h"  #include "verifier_bitfield_write.skel.h" @@ -28,6 +29,7 @@  #include "verifier_div0.skel.h"  #include "verifier_div_overflow.skel.h"  #include "verifier_global_subprogs.skel.h" +#include "verifier_global_ptr_args.skel.h"  #include "verifier_gotol.skel.h"  #include "verifier_helper_access_var_len.skel.h"  #include "verifier_helper_packet_access.skel.h" @@ -117,6 +119,7 @@ static void run_tests_aux(const char *skel_name,  #define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL)  void test_verifier_and(void)                  { RUN(verifier_and); } +void test_verifier_arena(void)                { RUN(verifier_arena); }  void test_verifier_basic_stack(void)          { RUN(verifier_basic_stack); }  void test_verifier_bitfield_write(void)       { RUN(verifier_bitfield_write); }  void test_verifier_bounds(void)               { RUN(verifier_bounds); } @@ -140,6 +143,7 @@ void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_st  void test_verifier_div0(void)                 { RUN(verifier_div0); }  void test_verifier_div_overflow(void)         { RUN(verifier_div_overflow); }  void test_verifier_global_subprogs(void)      { RUN(verifier_global_subprogs); } +void test_verifier_global_ptr_args(void)      { RUN(verifier_global_ptr_args); }  void test_verifier_gotol(void)                { RUN(verifier_gotol); }  void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }  void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index c3b45745cbcc..6d8b54124cb3 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -511,7 +511,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)  	if (!ASSERT_OK(err, "bond bpf_xdp_query"))  		goto out; -	if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK, +	if (!ASSERT_EQ(query_opts.feature_flags, 0,  		       "bond query_opts.feature_flags"))  		goto out; @@ -601,7 +601,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)  	if (!ASSERT_OK(err, "bond bpf_xdp_query"))  		goto out; -	ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK, +	ASSERT_EQ(query_opts.feature_flags, 0,  		  "bond query_opts.feature_flags");  out:  	bpf_link__destroy(link); diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c index f3927829a55a..4599154c8e9b 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdpwall.c +++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c @@ -9,7 +9,7 @@ void test_xdpwall(void)  	struct xdpwall *skel;  	skel = xdpwall__open_and_load(); -	ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?"); +	ASSERT_OK_PTR(skel, "Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5?");  	xdpwall__destroy(skel);  } |