diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
26 files changed, 2394 insertions, 394 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index e89685bd587c..3369c5ec3a17 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ #include <test_progs.h> +#include <unistd.h> +#include <sys/syscall.h> #include "bpf_iter_ipv6_route.skel.h" #include "bpf_iter_netlink.skel.h" #include "bpf_iter_bpf_map.skel.h" @@ -14,6 +16,7 @@ #include "bpf_iter_udp4.skel.h" #include "bpf_iter_udp6.skel.h" #include "bpf_iter_unix.skel.h" +#include "bpf_iter_vma_offset.skel.h" #include "bpf_iter_test_kern1.skel.h" #include "bpf_iter_test_kern2.skel.h" #include "bpf_iter_test_kern3.skel.h" @@ -43,13 +46,13 @@ static void test_btf_id_or_null(void) } } -static void do_dummy_read(struct bpf_program *prog) +static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts) { struct bpf_link *link; char buf[16] = {}; int iter_fd, len; - link = bpf_program__attach_iter(prog, NULL); + link = bpf_program__attach_iter(prog, opts); if (!ASSERT_OK_PTR(link, "attach_iter")) return; @@ -68,6 +71,11 @@ free_link: bpf_link__destroy(link); } +static void do_dummy_read(struct bpf_program *prog) +{ + do_dummy_read_opts(prog, NULL); +} + static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, struct bpf_map *map) { @@ -167,19 +175,140 @@ static void test_bpf_map(void) bpf_iter_bpf_map__destroy(skel); } -static void test_task(void) +static int pidfd_open(pid_t pid, unsigned int flags) +{ + return syscall(SYS_pidfd_open, pid, flags); +} + +static void check_bpf_link_info(const struct bpf_program *prog) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link_info info = {}; + struct bpf_link *link; + __u32 info_len; + int err; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.tid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(prog, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return; + + info_len = sizeof(info); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len); + ASSERT_OK(err, "bpf_obj_get_info_by_fd"); + ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid"); + + bpf_link__destroy(link); +} + +static pthread_mutex_t do_nothing_mutex; + +static void *do_nothing_wait(void *arg) +{ + pthread_mutex_lock(&do_nothing_mutex); + pthread_mutex_unlock(&do_nothing_mutex); + + pthread_exit(arg); +} + +static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, + int *num_unknown, int *num_known) { struct bpf_iter_task *skel; + pthread_t thread_id; + void *ret; skel = bpf_iter_task__open_and_load(); if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) return; - do_dummy_read(skel->progs.dump_task); + ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); + + ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), + "pthread_create"); + + skel->bss->tid = getpid(); + + do_dummy_read_opts(skel->progs.dump_task, opts); + + *num_unknown = skel->bss->num_unknown_tid; + *num_known = skel->bss->num_known_tid; + + ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); + ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, + "pthread_join"); bpf_iter_task__destroy(skel); } +static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known) +{ + int num_unknown_tid, num_known_tid; + + test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid); + ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid"); + ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid"); +} + +static void test_task_tid(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + int num_unknown_tid, num_known_tid; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.tid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + test_task_common(&opts, 0, 1); + + linfo.task.tid = 0; + linfo.task.pid = getpid(); + test_task_common(&opts, 1, 1); + + test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid); + ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid"); + ASSERT_EQ(num_known_tid, 1, "check_num_known_tid"); +} + +static void test_task_pid(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.pid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + test_task_common(&opts, 1, 1); +} + +static void test_task_pidfd(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + int pidfd; + + pidfd = pidfd_open(getpid(), 0); + if (!ASSERT_GT(pidfd, 0, "pidfd_open")) + return; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.pid_fd = pidfd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + test_task_common(&opts, 1, 1); + + close(pidfd); +} + static void test_task_sleepable(void) { struct bpf_iter_task *skel; @@ -212,14 +341,11 @@ static void test_task_stack(void) bpf_iter_task_stack__destroy(skel); } -static void *do_nothing(void *arg) -{ - pthread_exit(arg); -} - static void test_task_file(void) { + LIBBPF_OPTS(bpf_iter_attach_opts, opts); struct bpf_iter_task_file *skel; + union bpf_iter_link_info linfo; pthread_t thread_id; void *ret; @@ -229,19 +355,36 @@ static void test_task_file(void) skel->bss->tgid = getpid(); - if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL), - "pthread_create")) - goto done; + ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); - do_dummy_read(skel->progs.dump_task_file); + ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), + "pthread_create"); + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.tid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); - if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, - "pthread_join")) - goto done; + do_dummy_read_opts(skel->progs.dump_task_file, &opts); ASSERT_EQ(skel->bss->count, 0, "check_count"); + ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count"); + + skel->bss->last_tgid = 0; + skel->bss->count = 0; + skel->bss->unique_tgid_count = 0; + + do_dummy_read(skel->progs.dump_task_file); + + ASSERT_EQ(skel->bss->count, 0, "check_count"); + ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count"); + + check_bpf_link_info(skel->progs.dump_task_file); + + ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); + ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join"); + ASSERT_NULL(ret, "pthread_join"); -done: bpf_iter_task_file__destroy(skel); } @@ -1249,7 +1392,7 @@ static void str_strip_first_line(char *str) *dst = '\0'; } -static void test_task_vma(void) +static void test_task_vma_common(struct bpf_iter_attach_opts *opts) { int err, iter_fd = -1, proc_maps_fd = -1; struct bpf_iter_task_vma *skel; @@ -1261,13 +1404,14 @@ static void test_task_vma(void) return; skel->bss->pid = getpid(); + skel->bss->one_task = opts ? 1 : 0; err = bpf_iter_task_vma__load(skel); if (!ASSERT_OK(err, "bpf_iter_task_vma__load")) goto out; skel->links.proc_maps = bpf_program__attach_iter( - skel->progs.proc_maps, NULL); + skel->progs.proc_maps, opts); if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) { skel->links.proc_maps = NULL; @@ -1291,6 +1435,8 @@ static void test_task_vma(void) goto out; len += err; } + if (opts) + ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task"); /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); @@ -1306,6 +1452,9 @@ static void test_task_vma(void) str_strip_first_line(proc_maps_output); ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output"); + + check_bpf_link_info(skel->progs.proc_maps); + out: close(proc_maps_fd); close(iter_fd); @@ -1325,8 +1474,93 @@ void test_bpf_sockmap_map_iter_fd(void) bpf_iter_sockmap__destroy(skel); } +static void test_task_vma(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.tid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + test_task_vma_common(&opts); + test_task_vma_common(NULL); +} + +/* uprobe attach point */ +static noinline int trigger_func(int arg) +{ + asm volatile (""); + return arg + 1; +} + +static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc) +{ + struct bpf_iter_vma_offset *skel; + struct bpf_link *link; + char buf[16] = {}; + int iter_fd, len; + int pgsz, shift; + + skel = bpf_iter_vma_offset__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load")) + return; + + skel->bss->pid = getpid(); + skel->bss->address = (uintptr_t)trigger_func; + for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++) + ; + skel->bss->page_shift = shift; + + link = bpf_program__attach_iter(skel->progs.get_vma_offset, opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GT(iter_fd, 0, "create_iter")) + goto exit; + + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + buf[15] = 0; + ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp"); + + ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset"); + if (one_proc) + ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); + else + ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); + + close(iter_fd); + +exit: + bpf_link__destroy(link); +} + +static void test_task_vma_offset(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.pid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + test_task_vma_offset_common(&opts, true); + + linfo.task.pid = 0; + linfo.task.tid = getpid(); + test_task_vma_offset_common(&opts, true); + + test_task_vma_offset_common(NULL, false); +} + void test_bpf_iter(void) { + ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init"); + if (test__start_subtest("btf_id_or_null")) test_btf_id_or_null(); if (test__start_subtest("ipv6_route")) @@ -1335,8 +1569,12 @@ void test_bpf_iter(void) test_netlink(); if (test__start_subtest("bpf_map")) test_bpf_map(); - if (test__start_subtest("task")) - test_task(); + if (test__start_subtest("task_tid")) + test_task_tid(); + if (test__start_subtest("task_pid")) + test_task_pid(); + if (test__start_subtest("task_pidfd")) + test_task_pidfd(); if (test__start_subtest("task_sleepable")) test_task_sleepable(); if (test__start_subtest("task_stack")) @@ -1397,4 +1635,6 @@ void test_bpf_iter(void) test_ksym_iter(); if (test__start_subtest("bpf_sockmap_map_iter_fd")) test_bpf_sockmap_map_iter_fd(); + if (test__start_subtest("vma_offset")) + test_task_vma_offset(); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index 544bf90ac2a7..8a838ea8bdf3 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #include <network_helpers.h> +#include <linux/netfilter/nf_conntrack_common.h> #include "test_bpf_nf.skel.h" #include "test_bpf_nf_fail.skel.h" @@ -17,6 +18,7 @@ struct { { "set_status_after_insert", "kernel function bpf_ct_set_status args#0 expected pointer to STRUCT nf_conn___init but" }, { "change_timeout_after_alloc", "kernel function bpf_ct_change_timeout args#0 expected pointer to STRUCT nf_conn but" }, { "change_status_after_alloc", "kernel function bpf_ct_change_status args#0 expected pointer to STRUCT nf_conn but" }, + { "write_not_allowlisted_field", "no write support to nf_conn at off" }, }; enum { @@ -24,7 +26,10 @@ enum { TEST_TC_BPF, }; -#define TIMEOUT_MS 3000 +#define TIMEOUT_MS 3000 +#define IPS_STATUS_MASK (IPS_CONFIRMED | IPS_SEEN_REPLY | \ + IPS_SRC_NAT_DONE | IPS_DST_NAT_DONE | \ + IPS_SRC_NAT | IPS_DST_NAT) static int connect_to_server(int srv_fd) { @@ -111,10 +116,12 @@ static void test_bpf_nf_ct(int mode) /* allow some tolerance for test_delta_timeout value to avoid races. */ ASSERT_GT(skel->bss->test_delta_timeout, 8, "Test for min ct timeout update"); ASSERT_LE(skel->bss->test_delta_timeout, 10, "Test for max ct timeout update"); - /* expected status is IPS_SEEN_REPLY */ - ASSERT_EQ(skel->bss->test_status, 2, "Test for ct status update "); + ASSERT_EQ(skel->bss->test_insert_lookup_mark, 77, "Test for insert and lookup mark value"); + ASSERT_EQ(skel->bss->test_status, IPS_STATUS_MASK, "Test for ct status update "); ASSERT_EQ(skel->data->test_exist_lookup, 0, "Test existing connection lookup"); ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark"); + ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting"); + ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting"); end: if (srv_client_fd != -1) close(srv_client_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 2959a52ced06..e980188d4124 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -290,6 +290,10 @@ static void test_dctcp_fallback(void) goto done; ASSERT_STREQ(dctcp_skel->bss->cc_res, "cubic", "cc_res"); ASSERT_EQ(dctcp_skel->bss->tcp_cdg_res, -ENOTSUPP, "tcp_cdg_res"); + /* All setsockopt(TCP_CONGESTION) in the recurred + * bpf_dctcp->init() should fail with -EBUSY. + */ + ASSERT_EQ(dctcp_skel->bss->ebusy_cnt, 3, "ebusy_cnt"); err = getsockopt(srv_fd, SOL_TCP, TCP_CONGESTION, srv_cc, &cc_len); if (!ASSERT_OK(err, "getsockopt(srv_fd, TCP_CONGESTION)")) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index b1ca954ed1e5..24da335482d4 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -764,7 +764,7 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, /* union with nested struct */ TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT, - "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (enum bpf_cgroup_iter_order)BPF_CGROUP_ITER_SELF_ONLY,.cgroup_fd = (__u32)1,},}", + "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (enum bpf_cgroup_iter_order)BPF_CGROUP_ITER_SELF_ONLY,.cgroup_fd = (__u32)1,},.task = (struct){.tid = (__u32)1,.pid = (__u32)1,},}", { .cgroup = { .order = 1, .cgroup_fd = 1, }}); /* struct skb with nested structs/unions; because type output is so diff --git a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c index 664ffc0364f4..7a277035c275 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c @@ -22,26 +22,6 @@ static __u32 duration; #define PROG_PIN_FILE "/sys/fs/bpf/btf_skc_cls_ingress" -static int write_sysctl(const char *sysctl, const char *value) -{ - int fd, err, len; - - fd = open(sysctl, O_WRONLY); - if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n", - sysctl, strerror(errno), errno)) - return -1; - - len = strlen(value); - err = write(fd, value, len); - close(fd); - if (CHECK(err != len, "write sysctl", - "write(%s, %s, %d): err:%d %s (%d)\n", - sysctl, value, len, err, strerror(errno), errno)) - return -1; - - return 0; -} - static int prepare_netns(void) { if (CHECK(unshare(CLONE_NEWNET), "create netns", diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c index bed1661596f7..3bd27d2ea668 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c @@ -1,6 +1,22 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Functions to manage eBPF programs attached to cgroup subsystems + * This test makes sure BPF stats collection using rstat works correctly. + * The test uses 3 BPF progs: + * (a) counter: This BPF prog is invoked every time we attach a process to a + * cgroup and locklessly increments a percpu counter. + * The program then calls cgroup_rstat_updated() to inform rstat + * of an update on the (cpu, cgroup) pair. + * + * (b) flusher: This BPF prog is invoked when an rstat flush is ongoing, it + * aggregates all percpu counters to a total counter, and also + * propagates the changes to the ancestor cgroups. + * + * (c) dumper: This BPF prog is a cgroup_iter. It is used to output the total + * counter of a cgroup through reading a file in userspace. + * + * The test sets up a cgroup hierarchy, and the above programs. It spawns a few + * processes in the leaf cgroups and makes sure all the counters are aggregated + * correctly. * * Copyright 2022 Google LLC. */ @@ -21,8 +37,10 @@ #define PAGE_SIZE 4096 #define MB(x) (x << 20) +#define PROCESSES_PER_CGROUP 3 + #define BPFFS_ROOT "/sys/fs/bpf/" -#define BPFFS_VMSCAN BPFFS_ROOT"vmscan/" +#define BPFFS_ATTACH_COUNTERS BPFFS_ROOT "attach_counters/" #define CG_ROOT_NAME "root" #define CG_ROOT_ID 1 @@ -79,7 +97,7 @@ static int setup_bpffs(void) return err; /* Create a directory to contain stat files in bpffs */ - err = mkdir(BPFFS_VMSCAN, 0755); + err = mkdir(BPFFS_ATTACH_COUNTERS, 0755); if (!ASSERT_OK(err, "mkdir")) return err; @@ -89,7 +107,7 @@ static int setup_bpffs(void) static void cleanup_bpffs(void) { /* Remove created directory in bpffs */ - ASSERT_OK(rmdir(BPFFS_VMSCAN), "rmdir "BPFFS_VMSCAN); + ASSERT_OK(rmdir(BPFFS_ATTACH_COUNTERS), "rmdir "BPFFS_ATTACH_COUNTERS); /* Unmount bpffs, if it wasn't already mounted when we started */ if (mounted_bpffs) @@ -118,18 +136,6 @@ static int setup_cgroups(void) cgroups[i].fd = fd; cgroups[i].id = get_cgroup_id(cgroups[i].path); - - /* - * Enable memcg controller for the entire hierarchy. - * Note that stats are collected for all cgroups in a hierarchy - * with memcg enabled anyway, but are only exposed for cgroups - * that have memcg enabled. - */ - if (i < N_NON_LEAF_CGROUPS) { - err = enable_controllers(cgroups[i].path, "memory"); - if (!ASSERT_OK(err, "enable_controllers")) - return err; - } } return 0; } @@ -154,109 +160,85 @@ static void destroy_hierarchy(void) cleanup_bpffs(); } -static int reclaimer(const char *cgroup_path, size_t size) -{ - static char size_buf[128]; - char *buf, *ptr; - int err; - - /* Join cgroup in the parent process workdir */ - if (join_parent_cgroup(cgroup_path)) - return EACCES; - - /* Allocate memory */ - buf = malloc(size); - if (!buf) - return ENOMEM; - - /* Write to memory to make sure it's actually allocated */ - for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) - *ptr = 1; - - /* Try to reclaim memory */ - snprintf(size_buf, 128, "%lu", size); - err = write_cgroup_file_parent(cgroup_path, "memory.reclaim", size_buf); - - free(buf); - /* memory.reclaim returns EAGAIN if the amount is not fully reclaimed */ - if (err && errno != EAGAIN) - return errno; - - return 0; -} - -static int induce_vmscan(void) +static int attach_processes(void) { - int i, status; + int i, j, status; - /* - * In every leaf cgroup, run a child process that allocates some memory - * and attempts to reclaim some of it. - */ + /* In every leaf cgroup, attach 3 processes */ for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) { - pid_t pid; - - /* Create reclaimer child */ - pid = fork(); - if (pid == 0) { - status = reclaimer(cgroups[i].path, MB(5)); - exit(status); + for (j = 0; j < PROCESSES_PER_CGROUP; j++) { + pid_t pid; + + /* Create child and attach to cgroup */ + pid = fork(); + if (pid == 0) { + if (join_parent_cgroup(cgroups[i].path)) + exit(EACCES); + exit(0); + } + + /* Cleanup child */ + waitpid(pid, &status, 0); + if (!ASSERT_TRUE(WIFEXITED(status), "child process exited")) + return 1; + if (!ASSERT_EQ(WEXITSTATUS(status), 0, + "child process exit code")) + return 1; } - - /* Cleanup reclaimer child */ - waitpid(pid, &status, 0); - ASSERT_TRUE(WIFEXITED(status), "reclaimer exited"); - ASSERT_EQ(WEXITSTATUS(status), 0, "reclaim exit code"); } return 0; } static unsigned long long -get_cgroup_vmscan_delay(unsigned long long cgroup_id, const char *file_name) +get_attach_counter(unsigned long long cgroup_id, const char *file_name) { - unsigned long long vmscan = 0, id = 0; + unsigned long long attach_counter = 0, id = 0; static char buf[128], path[128]; /* For every cgroup, read the file generated by cgroup_iter */ - snprintf(path, 128, "%s%s", BPFFS_VMSCAN, file_name); + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name); if (!ASSERT_OK(read_from_file(path, buf, 128), "read cgroup_iter")) return 0; /* Check the output file formatting */ - ASSERT_EQ(sscanf(buf, "cg_id: %llu, total_vmscan_delay: %llu\n", - &id, &vmscan), 2, "output format"); + ASSERT_EQ(sscanf(buf, "cg_id: %llu, attach_counter: %llu\n", + &id, &attach_counter), 2, "output format"); /* Check that the cgroup_id is displayed correctly */ ASSERT_EQ(id, cgroup_id, "cgroup_id"); - /* Check that the vmscan reading is non-zero */ - ASSERT_GT(vmscan, 0, "vmscan_reading"); - return vmscan; + /* Check that the counter is non-zero */ + ASSERT_GT(attach_counter, 0, "attach counter non-zero"); + return attach_counter; } -static void check_vmscan_stats(void) +static void check_attach_counters(void) { - unsigned long long vmscan_readings[N_CGROUPS], vmscan_root; + unsigned long long attach_counters[N_CGROUPS], root_attach_counter; int i; - for (i = 0; i < N_CGROUPS; i++) { - vmscan_readings[i] = get_cgroup_vmscan_delay(cgroups[i].id, - cgroups[i].name); - } + for (i = 0; i < N_CGROUPS; i++) + attach_counters[i] = get_attach_counter(cgroups[i].id, + cgroups[i].name); /* Read stats for root too */ - vmscan_root = get_cgroup_vmscan_delay(CG_ROOT_ID, CG_ROOT_NAME); + root_attach_counter = get_attach_counter(CG_ROOT_ID, CG_ROOT_NAME); + + /* Check that all leafs cgroups have an attach counter of 3 */ + for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) + ASSERT_EQ(attach_counters[i], PROCESSES_PER_CGROUP, + "leaf cgroup attach counter"); /* Check that child1 == child1_1 + child1_2 */ - ASSERT_EQ(vmscan_readings[1], vmscan_readings[3] + vmscan_readings[4], - "child1_vmscan"); + ASSERT_EQ(attach_counters[1], attach_counters[3] + attach_counters[4], + "child1_counter"); /* Check that child2 == child2_1 + child2_2 */ - ASSERT_EQ(vmscan_readings[2], vmscan_readings[5] + vmscan_readings[6], - "child2_vmscan"); + ASSERT_EQ(attach_counters[2], attach_counters[5] + attach_counters[6], + "child2_counter"); /* Check that test == child1 + child2 */ - ASSERT_EQ(vmscan_readings[0], vmscan_readings[1] + vmscan_readings[2], - "test_vmscan"); + ASSERT_EQ(attach_counters[0], attach_counters[1] + attach_counters[2], + "test_counter"); /* Check that root >= test */ - ASSERT_GE(vmscan_root, vmscan_readings[1], "root_vmscan"); + ASSERT_GE(root_attach_counter, attach_counters[1], "root_counter"); } /* Creates iter link and pins in bpffs, returns 0 on success, -errno on failure. @@ -278,12 +260,12 @@ static int setup_cgroup_iter(struct cgroup_hierarchical_stats *obj, linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY; opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); - link = bpf_program__attach_iter(obj->progs.dump_vmscan, &opts); + link = bpf_program__attach_iter(obj->progs.dumper, &opts); if (!ASSERT_OK_PTR(link, "attach_iter")) return -EFAULT; /* Pin the link to a bpffs file */ - snprintf(path, 128, "%s%s", BPFFS_VMSCAN, file_name); + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name); err = bpf_link__pin(link, path); ASSERT_OK(err, "pin cgroup_iter"); @@ -313,7 +295,7 @@ static int setup_progs(struct cgroup_hierarchical_stats **skel) if (!ASSERT_OK(err, "setup_cgroup_iter")) return err; - bpf_program__set_autoattach((*skel)->progs.dump_vmscan, false); + bpf_program__set_autoattach((*skel)->progs.dumper, false); err = cgroup_hierarchical_stats__attach(*skel); if (!ASSERT_OK(err, "attach")) return err; @@ -328,13 +310,13 @@ static void destroy_progs(struct cgroup_hierarchical_stats *skel) for (i = 0; i < N_CGROUPS; i++) { /* Delete files in bpffs that cgroup_iters are pinned in */ - snprintf(path, 128, "%s%s", BPFFS_VMSCAN, + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, cgroups[i].name); ASSERT_OK(remove(path), "remove cgroup_iter pin"); } /* Delete root file in bpffs */ - snprintf(path, 128, "%s%s", BPFFS_VMSCAN, CG_ROOT_NAME); + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, CG_ROOT_NAME); ASSERT_OK(remove(path), "remove cgroup_iter root pin"); cgroup_hierarchical_stats__destroy(skel); } @@ -347,9 +329,9 @@ void test_cgroup_hierarchical_stats(void) goto hierarchy_cleanup; if (setup_progs(&skel)) goto cleanup; - if (induce_vmscan()) + if (attach_processes()) goto cleanup; - check_vmscan_stats(); + check_attach_counters(); cleanup: destroy_progs(skel); hierarchy_cleanup: diff --git a/tools/testing/selftests/bpf/prog_tests/connect_ping.c b/tools/testing/selftests/bpf/prog_tests/connect_ping.c new file mode 100644 index 000000000000..289218c2216c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/connect_ping.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2022 Google LLC. + */ + +#define _GNU_SOURCE +#include <sys/mount.h> + +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" + +#include "connect_ping.skel.h" + +/* 2001:db8::1 */ +#define BINDADDR_V6 { { { 0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,1 } } } +static const struct in6_addr bindaddr_v6 = BINDADDR_V6; + +static void subtest(int cgroup_fd, struct connect_ping *skel, + int family, int do_bind) +{ + struct sockaddr_in sa4 = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_LOOPBACK), + }; + struct sockaddr_in6 sa6 = { + .sin6_family = AF_INET6, + .sin6_addr = IN6ADDR_LOOPBACK_INIT, + }; + struct sockaddr *sa; + socklen_t sa_len; + int protocol; + int sock_fd; + + switch (family) { + case AF_INET: + sa = (struct sockaddr *)&sa4; + sa_len = sizeof(sa4); + protocol = IPPROTO_ICMP; + break; + case AF_INET6: + sa = (struct sockaddr *)&sa6; + sa_len = sizeof(sa6); + protocol = IPPROTO_ICMPV6; + break; + } + + memset(skel->bss, 0, sizeof(*skel->bss)); + skel->bss->do_bind = do_bind; + + sock_fd = socket(family, SOCK_DGRAM, protocol); + if (!ASSERT_GE(sock_fd, 0, "sock-create")) + return; + + if (!ASSERT_OK(connect(sock_fd, sa, sa_len), "connect")) + goto close_sock; + + if (!ASSERT_EQ(skel->bss->invocations_v4, family == AF_INET ? 1 : 0, + "invocations_v4")) + goto close_sock; + if (!ASSERT_EQ(skel->bss->invocations_v6, family == AF_INET6 ? 1 : 0, + "invocations_v6")) + goto close_sock; + if (!ASSERT_EQ(skel->bss->has_error, 0, "has_error")) + goto close_sock; + + if (!ASSERT_OK(getsockname(sock_fd, sa, &sa_len), + "getsockname")) + goto close_sock; + + switch (family) { + case AF_INET: + if (!ASSERT_EQ(sa4.sin_family, family, "sin_family")) + goto close_sock; + if (!ASSERT_EQ(sa4.sin_addr.s_addr, + htonl(do_bind ? 0x01010101 : INADDR_LOOPBACK), + "sin_addr")) + goto close_sock; + break; + case AF_INET6: + if (!ASSERT_EQ(sa6.sin6_family, AF_INET6, "sin6_family")) + goto close_sock; + if (!ASSERT_EQ(memcmp(&sa6.sin6_addr, + do_bind ? &bindaddr_v6 : &in6addr_loopback, + sizeof(sa6.sin6_addr)), + 0, "sin6_addr")) + goto close_sock; + break; + } + +close_sock: + close(sock_fd); +} + +void test_connect_ping(void) +{ + struct connect_ping *skel; + int cgroup_fd; + + if (!ASSERT_OK(unshare(CLONE_NEWNET | CLONE_NEWNS), "unshare")) + return; + + /* overmount sysfs, and making original sysfs private so overmount + * does not propagate to other mntns. + */ + if (!ASSERT_OK(mount("none", "/sys", NULL, MS_PRIVATE, NULL), + "remount-private-sys")) + return; + if (!ASSERT_OK(mount("sysfs", "/sys", "sysfs", 0, NULL), + "mount-sys")) + return; + if (!ASSERT_OK(mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL), + "mount-bpf")) + goto clean_mount; + + if (!ASSERT_OK(system("ip link set dev lo up"), "lo-up")) + goto clean_mount; + if (!ASSERT_OK(system("ip addr add 1.1.1.1 dev lo"), "lo-addr-v4")) + goto clean_mount; + if (!ASSERT_OK(system("ip -6 addr add 2001:db8::1 dev lo"), "lo-addr-v6")) + goto clean_mount; + if (write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0")) + goto clean_mount; + + cgroup_fd = test__join_cgroup("/connect_ping"); + if (!ASSERT_GE(cgroup_fd, 0, "cg-create")) + goto clean_mount; + + skel = connect_ping__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel-load")) + goto close_cgroup; + skel->links.connect_v4_prog = + bpf_program__attach_cgroup(skel->progs.connect_v4_prog, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.connect_v4_prog, "cg-attach-v4")) + goto skel_destroy; + skel->links.connect_v6_prog = + bpf_program__attach_cgroup(skel->progs.connect_v6_prog, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.connect_v6_prog, "cg-attach-v6")) + goto skel_destroy; + + /* Connect a v4 ping socket to localhost, assert that only v4 is called, + * and called exactly once, and that the socket's bound address is + * original loopback address. + */ + if (test__start_subtest("ipv4")) + subtest(cgroup_fd, skel, AF_INET, 0); + + /* Connect a v4 ping socket to localhost, assert that only v4 is called, + * and called exactly once, and that the socket's bound address is + * address we explicitly bound. + */ + if (test__start_subtest("ipv4-bind")) + subtest(cgroup_fd, skel, AF_INET, 1); + + /* Connect a v6 ping socket to localhost, assert that only v6 is called, + * and called exactly once, and that the socket's bound address is + * original loopback address. + */ + if (test__start_subtest("ipv6")) + subtest(cgroup_fd, skel, AF_INET6, 0); + + /* Connect a v6 ping socket to localhost, assert that only v6 is called, + * and called exactly once, and that the socket's bound address is + * address we explicitly bound. + */ + if (test__start_subtest("ipv6-bind")) + subtest(cgroup_fd, skel, AF_INET6, 1); + +skel_destroy: + connect_ping__destroy(skel); + +close_cgroup: + close(cgroup_fd); + +clean_mount: + umount2("/sys", MNT_DETACH); +} diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index bcf80b9f7c27..8fc4e6c02bfd 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -30,7 +30,7 @@ static struct { {"invalid_helper2", "Expected an initialized dynptr as arg #3"}, {"invalid_write1", "Expected an initialized dynptr as arg #1"}, {"invalid_write2", "Expected an initialized dynptr as arg #3"}, - {"invalid_write3", "Expected an initialized ringbuf dynptr as arg #1"}, + {"invalid_write3", "Expected an initialized dynptr as arg #1"}, {"invalid_write4", "arg 1 is an unacquired reference"}, {"invalid_read1", "invalid read from stack"}, {"invalid_read2", "cannot pass in dynptr at an offset"}, diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c index 938dbd4d7c2f..fede8ef58b5b 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -2,7 +2,7 @@ #include <test_progs.h> #include "get_func_ip_test.skel.h" -void test_get_func_ip_test(void) +static void test_function_entry(void) { struct get_func_ip_test *skel = NULL; int err, prog_fd; @@ -12,14 +12,6 @@ void test_get_func_ip_test(void) if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) return; - /* test6 is x86_64 specifc because of the instruction - * offset, disabling it for all other archs - */ -#ifndef __x86_64__ - bpf_program__set_autoload(skel->progs.test6, false); - bpf_program__set_autoload(skel->progs.test7, false); -#endif - err = get_func_ip_test__load(skel); if (!ASSERT_OK(err, "get_func_ip_test__load")) goto cleanup; @@ -43,11 +35,56 @@ void test_get_func_ip_test(void) ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); + +cleanup: + get_func_ip_test__destroy(skel); +} + +/* test6 is x86_64 specific because of the instruction + * offset, disabling it for all other archs + */ #ifdef __x86_64__ +static void test_function_body(void) +{ + struct get_func_ip_test *skel = NULL; + LIBBPF_OPTS(bpf_test_run_opts, topts); + LIBBPF_OPTS(bpf_kprobe_opts, kopts); + struct bpf_link *link6 = NULL; + int err, prog_fd; + + skel = get_func_ip_test__open(); + if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) + return; + + bpf_program__set_autoload(skel->progs.test6, true); + + err = get_func_ip_test__load(skel); + if (!ASSERT_OK(err, "get_func_ip_test__load")) + goto cleanup; + + kopts.offset = skel->kconfig->CONFIG_X86_KERNEL_IBT ? 9 : 5; + + link6 = bpf_program__attach_kprobe_opts(skel->progs.test6, "bpf_fentry_test6", &kopts); + if (!ASSERT_OK_PTR(link6, "link6")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + ASSERT_EQ(skel->bss->test6_result, 1, "test6_result"); - ASSERT_EQ(skel->bss->test7_result, 1, "test7_result"); -#endif cleanup: + bpf_link__destroy(link6); get_func_ip_test__destroy(skel); } +#else +#define test_function_body() +#endif + +void test_get_func_ip_test(void) +{ + test_function_entry(); + test_function_body(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index eede7c304f86..5af1ee8f0e6e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -2,6 +2,8 @@ /* Copyright (c) 2021 Facebook */ #include <test_progs.h> #include <network_helpers.h> +#include "kfunc_call_fail.skel.h" +#include "kfunc_call_test.skel.h" #include "kfunc_call_test.lskel.h" #include "kfunc_call_test_subprog.skel.h" #include "kfunc_call_test_subprog.lskel.h" @@ -9,36 +11,220 @@ #include "cap_helpers.h" -static void test_main(void) +static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576]; + +enum kfunc_test_type { + tc_test = 0, + syscall_test, + syscall_null_ctx_test, +}; + +struct kfunc_test_params { + const char *prog_name; + unsigned long lskel_prog_desc_offset; + int retval; + enum kfunc_test_type test_type; + const char *expected_err_msg; +}; + +#define __BPF_TEST_SUCCESS(name, __retval, type) \ + { \ + .prog_name = #name, \ + .lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \ + .retval = __retval, \ + .test_type = type, \ + .expected_err_msg = NULL, \ + } + +#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \ + { \ + .prog_name = #name, \ + .lskel_prog_desc_offset = 0 /* unused when test is failing */, \ + .retval = __retval, \ + .test_type = type, \ + .expected_err_msg = error_msg, \ + } + +#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test) +#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test) +#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test) + +#define TC_FAIL(name, retval, error_msg) __BPF_TEST_FAIL(name, retval, tc_test, error_msg) +#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \ + __BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg) + +static struct kfunc_test_params kfunc_tests[] = { + /* failure cases: + * if retval is 0 -> the program will fail to load and the error message is an error + * if retval is not 0 -> the program can be loaded but running it will gives the + * provided return value. The error message is thus the one + * from a successful load + */ + SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"), + SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"), + TC_FAIL(kfunc_call_test_get_mem_fail_rdonly, 0, "R0 cannot write into rdonly_mem"), + TC_FAIL(kfunc_call_test_get_mem_fail_use_after_free, 0, "invalid mem access 'scalar'"), + TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"), + TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"), + TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"), + + /* success cases */ + TC_TEST(kfunc_call_test1, 12), + TC_TEST(kfunc_call_test2, 3), + TC_TEST(kfunc_call_test_ref_btf_id, 0), + TC_TEST(kfunc_call_test_get_mem, 42), + SYSCALL_TEST(kfunc_syscall_test, 0), + SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0), +}; + +struct syscall_test_args { + __u8 data[16]; + size_t size; +}; + +static void verify_success(struct kfunc_test_params *param) { - struct kfunc_call_test_lskel *skel; + struct kfunc_call_test_lskel *lskel = NULL; + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct bpf_prog_desc *lskel_prog; + struct kfunc_call_test *skel; + struct bpf_program *prog; int prog_fd, err; - LIBBPF_OPTS(bpf_test_run_opts, topts, - .data_in = &pkt_v4, - .data_size_in = sizeof(pkt_v4), - .repeat = 1, - ); + struct syscall_test_args args = { + .size = 10, + }; + + switch (param->test_type) { + case syscall_test: + topts.ctx_in = &args; + topts.ctx_size_in = sizeof(args); + /* fallthrough */ + case syscall_null_ctx_test: + break; + case tc_test: + topts.data_in = &pkt_v4; + topts.data_size_in = sizeof(pkt_v4); + topts.repeat = 1; + break; + } - skel = kfunc_call_test_lskel__open_and_load(); + /* first test with normal libbpf */ + skel = kfunc_call_test__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel")) return; - prog_fd = skel->progs.kfunc_call_test1.prog_fd; - err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "bpf_prog_test_run(test1)"); - ASSERT_EQ(topts.retval, 12, "test1-retval"); + prog = bpf_object__find_program_by_name(skel->obj, param->prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; - prog_fd = skel->progs.kfunc_call_test2.prog_fd; + prog_fd = bpf_program__fd(prog); err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "bpf_prog_test_run(test2)"); - ASSERT_EQ(topts.retval, 3, "test2-retval"); + if (!ASSERT_OK(err, param->prog_name)) + goto cleanup; + + if (!ASSERT_EQ(topts.retval, param->retval, "retval")) + goto cleanup; + + /* second test with light skeletons */ + lskel = kfunc_call_test_lskel__open_and_load(); + if (!ASSERT_OK_PTR(lskel, "lskel")) + goto cleanup; - prog_fd = skel->progs.kfunc_call_test_ref_btf_id.prog_fd; + lskel_prog = (struct bpf_prog_desc *)((char *)lskel + param->lskel_prog_desc_offset); + + prog_fd = lskel_prog->prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "bpf_prog_test_run(test_ref_btf_id)"); - ASSERT_EQ(topts.retval, 0, "test_ref_btf_id-retval"); + if (!ASSERT_OK(err, param->prog_name)) + goto cleanup; + + ASSERT_EQ(topts.retval, param->retval, "retval"); + +cleanup: + kfunc_call_test__destroy(skel); + if (lskel) + kfunc_call_test_lskel__destroy(lskel); +} + +static void verify_fail(struct kfunc_test_params *param) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct bpf_program *prog; + struct kfunc_call_fail *skel; + int prog_fd, err; + struct syscall_test_args args = { + .size = 10, + }; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + switch (param->test_type) { + case syscall_test: + topts.ctx_in = &args; + topts.ctx_size_in = sizeof(args); + /* fallthrough */ + case syscall_null_ctx_test: + break; + case tc_test: + topts.data_in = &pkt_v4; + topts.data_size_in = sizeof(pkt_v4); + break; + topts.repeat = 1; + } + + skel = kfunc_call_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, param->prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + err = kfunc_call_fail__load(skel); + if (!param->retval) { + /* the verifier is supposed to complain and refuses to load */ + if (!ASSERT_ERR(err, "unexpected load success")) + goto out_err; + + } else { + /* the program is loaded but must dynamically fail */ + if (!ASSERT_OK(err, "unexpected load error")) + goto out_err; + + prog_fd = bpf_program__fd(prog); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_EQ(err, param->retval, param->prog_name)) + goto out_err; + } + +out_err: + if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + kfunc_call_fail__destroy(skel); +} + +static void test_main(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kfunc_tests); i++) { + if (!test__start_subtest(kfunc_tests[i].prog_name)) + continue; - kfunc_call_test_lskel__destroy(skel); + if (!kfunc_tests[i].expected_err_msg) + verify_success(&kfunc_tests[i]); + else + verify_fail(&kfunc_tests[i]); + } } static void test_subprog(void) @@ -121,8 +307,7 @@ static void test_destructive(void) void test_kfunc_call(void) { - if (test__start_subtest("main")) - test_main(); + test_main(); if (test__start_subtest("subprog")) test_subprog(); diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c new file mode 100644 index 000000000000..c210657d4d0a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (c) 2022 Facebook + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include <test_progs.h> +#include "test_kfunc_dynptr_param.skel.h" + +static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct { + const char *prog_name; + const char *expected_verifier_err_msg; + int expected_runtime_err; +} kfunc_dynptr_tests[] = { + {"dynptr_type_not_supp", + "arg#0 pointer type STRUCT bpf_dynptr_kern points to unsupported dynamic pointer type", 0}, + {"not_valid_dynptr", + "arg#0 pointer type STRUCT bpf_dynptr_kern must be valid and initialized", 0}, + {"not_ptr_to_stack", "arg#0 pointer type STRUCT bpf_dynptr_kern not to stack", 0}, + {"dynptr_data_null", NULL, -EBADMSG}, +}; + +static bool kfunc_not_supported; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, + va_list args) +{ + if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n")) + return 0; + + if (strcmp(va_arg(args, char *), "bpf_verify_pkcs7_signature")) + return 0; + + kfunc_not_supported = true; + return 0; +} + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + struct test_kfunc_dynptr_param *skel; + LIBBPF_OPTS(bpf_object_open_opts, opts); + libbpf_print_fn_t old_print_cb; + struct bpf_program *prog; + int err; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = test_kfunc_dynptr_param__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + kfunc_not_supported = false; + + old_print_cb = libbpf_set_print(libbpf_print_cb); + err = test_kfunc_dynptr_param__load(skel); + libbpf_set_print(old_print_cb); + + if (err < 0 && kfunc_not_supported) { + fprintf(stderr, + "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n", + __func__); + test__skip(); + goto cleanup; + } + + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + test_kfunc_dynptr_param__destroy(skel); +} + +static void verify_success(const char *prog_name, int expected_runtime_err) +{ + struct test_kfunc_dynptr_param *skel; + libbpf_print_fn_t old_print_cb; + struct bpf_program *prog; + struct bpf_link *link; + __u32 next_id; + int err; + + skel = test_kfunc_dynptr_param__open(); + if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open")) + return; + + skel->bss->pid = getpid(); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + kfunc_not_supported = false; + + old_print_cb = libbpf_set_print(libbpf_print_cb); + err = test_kfunc_dynptr_param__load(skel); + libbpf_set_print(old_print_cb); + + if (err < 0 && kfunc_not_supported) { + fprintf(stderr, + "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n", + __func__); + test__skip(); + goto cleanup; + } + + if (!ASSERT_OK(err, "test_kfunc_dynptr_param__load")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + goto cleanup; + + err = bpf_prog_get_next_id(0, &next_id); + + bpf_link__destroy(link); + + if (!ASSERT_OK(err, "bpf_prog_get_next_id")) + goto cleanup; + + ASSERT_EQ(skel->bss->err, expected_runtime_err, "err"); + +cleanup: + test_kfunc_dynptr_param__destroy(skel); +} + +void test_kfunc_dynptr_param(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kfunc_dynptr_tests); i++) { + if (!test__start_subtest(kfunc_dynptr_tests[i].prog_name)) + continue; + + if (kfunc_dynptr_tests[i].expected_verifier_err_msg) + verify_fail(kfunc_dynptr_tests[i].prog_name, + kfunc_dynptr_tests[i].expected_verifier_err_msg); + else + verify_success(kfunc_dynptr_tests[i].prog_name, + kfunc_dynptr_tests[i].expected_runtime_err); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_key.c b/tools/testing/selftests/bpf/prog_tests/lookup_key.c new file mode 100644 index 000000000000..68025e88f352 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lookup_key.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include <linux/keyctl.h> +#include <test_progs.h> + +#include "test_lookup_key.skel.h" + +#define KEY_LOOKUP_CREATE 0x01 +#define KEY_LOOKUP_PARTIAL 0x02 + +static bool kfunc_not_supported; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, + va_list args) +{ + char *func; + + if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n")) + return 0; + + func = va_arg(args, char *); + + if (strcmp(func, "bpf_lookup_user_key") && strcmp(func, "bpf_key_put") && + strcmp(func, "bpf_lookup_system_key")) + return 0; + + kfunc_not_supported = true; + return 0; +} + +void test_lookup_key(void) +{ + libbpf_print_fn_t old_print_cb; + struct test_lookup_key *skel; + __u32 next_id; + int ret; + + skel = test_lookup_key__open(); + if (!ASSERT_OK_PTR(skel, "test_lookup_key__open")) + return; + + old_print_cb = libbpf_set_print(libbpf_print_cb); + ret = test_lookup_key__load(skel); + libbpf_set_print(old_print_cb); + + if (ret < 0 && kfunc_not_supported) { + printf("%s:SKIP:bpf_lookup_*_key(), bpf_key_put() kfuncs not supported\n", + __func__); + test__skip(); + goto close_prog; + } + + if (!ASSERT_OK(ret, "test_lookup_key__load")) + goto close_prog; + + ret = test_lookup_key__attach(skel); + if (!ASSERT_OK(ret, "test_lookup_key__attach")) + goto close_prog; + + skel->bss->monitored_pid = getpid(); + skel->bss->key_serial = KEY_SPEC_THREAD_KEYRING; + + /* The thread-specific keyring does not exist, this test fails. */ + skel->bss->flags = 0; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_LT(ret, 0, "bpf_prog_get_next_id")) + goto close_prog; + + /* Force creation of the thread-specific keyring, this test succeeds. */ + skel->bss->flags = KEY_LOOKUP_CREATE; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_OK(ret, "bpf_prog_get_next_id")) + goto close_prog; + + /* Pass both lookup flags for parameter validation. */ + skel->bss->flags = KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_OK(ret, "bpf_prog_get_next_id")) + goto close_prog; + + /* Pass invalid flags. */ + skel->bss->flags = UINT64_MAX; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_LT(ret, 0, "bpf_prog_get_next_id")) + goto close_prog; + + skel->bss->key_serial = 0; + skel->bss->key_id = 1; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_OK(ret, "bpf_prog_get_next_id")) + goto close_prog; + + skel->bss->key_id = UINT32_MAX; + + ret = bpf_prog_get_next_id(0, &next_id); + ASSERT_LT(ret, 0, "bpf_prog_get_next_id"); + +close_prog: + skel->bss->monitored_pid = 0; + test_lookup_key__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index cec5c0882372..0aa088900699 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -27,21 +27,21 @@ static int connected_socket_v4(void) int s, repair, err; s = socket(AF_INET, SOCK_STREAM, 0); - if (CHECK_FAIL(s == -1)) + if (!ASSERT_GE(s, 0, "socket")) goto error; repair = TCP_REPAIR_ON; err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) goto error; err = connect(s, (struct sockaddr *)&addr, len); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "connect")) goto error; repair = TCP_REPAIR_OFF_NO_WP; err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) goto error; return s; @@ -54,7 +54,7 @@ error: static void compare_cookies(struct bpf_map *src, struct bpf_map *dst) { __u32 i, max_entries = bpf_map__max_entries(src); - int err, duration = 0, src_fd, dst_fd; + int err, src_fd, dst_fd; src_fd = bpf_map__fd(src); dst_fd = bpf_map__fd(dst); @@ -65,20 +65,18 @@ static void compare_cookies(struct bpf_map *src, struct bpf_map *dst) err = bpf_map_lookup_elem(src_fd, &i, &src_cookie); if (err && errno == ENOENT) { err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); - CHECK(!err, "map_lookup_elem(dst)", "element %u not deleted\n", i); - CHECK(err && errno != ENOENT, "map_lookup_elem(dst)", "%s\n", - strerror(errno)); + ASSERT_ERR(err, "map_lookup_elem(dst)"); + ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)"); continue; } - if (CHECK(err, "lookup_elem(src)", "%s\n", strerror(errno))) + if (!ASSERT_OK(err, "lookup_elem(src)")) continue; err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); - if (CHECK(err, "lookup_elem(dst)", "%s\n", strerror(errno))) + if (!ASSERT_OK(err, "lookup_elem(dst)")) continue; - CHECK(dst_cookie != src_cookie, "cookie mismatch", - "%llu != %llu (pos %u)\n", dst_cookie, src_cookie, i); + ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch"); } } @@ -89,20 +87,16 @@ static void test_sockmap_create_update_free(enum bpf_map_type map_type) int s, map, err; s = connected_socket_v4(); - if (CHECK_FAIL(s < 0)) + if (!ASSERT_GE(s, 0, "connected_socket_v4")) return; map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); - if (CHECK_FAIL(map < 0)) { - perror("bpf_cmap_create"); + if (!ASSERT_GE(map, 0, "bpf_map_create")) goto out; - } err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); - if (CHECK_FAIL(err)) { - perror("bpf_map_update"); + if (!ASSERT_OK(err, "bpf_map_update")) goto out; - } out: close(map); @@ -115,32 +109,26 @@ static void test_skmsg_helpers(enum bpf_map_type map_type) int err, map, verdict; skel = test_skmsg_load_helpers__open_and_load(); - if (CHECK_FAIL(!skel)) { - perror("test_skmsg_load_helpers__open_and_load"); + if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load")) return; - } verdict = bpf_program__fd(skel->progs.prog_msg_verdict); map = bpf_map__fd(skel->maps.sock_map); err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); - if (CHECK_FAIL(err)) { - perror("bpf_prog_attach"); + if (!ASSERT_OK(err, "bpf_prog_attach")) goto out; - } err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT); - if (CHECK_FAIL(err)) { - perror("bpf_prog_detach2"); + if (!ASSERT_OK(err, "bpf_prog_detach2")) goto out; - } out: test_skmsg_load_helpers__destroy(skel); } static void test_sockmap_update(enum bpf_map_type map_type) { - int err, prog, src, duration = 0; + int err, prog, src; struct test_sockmap_update *skel; struct bpf_map *dst_map; const __u32 zero = 0; @@ -153,11 +141,11 @@ static void test_sockmap_update(enum bpf_map_type map_type) __s64 sk; sk = connected_socket_v4(); - if (CHECK(sk == -1, "connected_socket_v4", "cannot connect\n")) + if (!ASSERT_NEQ(sk, -1, "connected_socket_v4")) return; skel = test_sockmap_update__open_and_load(); - if (CHECK(!skel, "open_and_load", "cannot load skeleton\n")) + if (!ASSERT_OK_PTR(skel, "open_and_load")) goto close_sk; prog = bpf_program__fd(skel->progs.copy_sock_map); @@ -168,7 +156,7 @@ static void test_sockmap_update(enum bpf_map_type map_type) dst_map = skel->maps.dst_sock_hash; err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST); - if (CHECK(err, "update_elem(src)", "errno=%u\n", errno)) + if (!ASSERT_OK(err, "update_elem(src)")) goto out; err = bpf_prog_test_run_opts(prog, &topts); @@ -188,17 +176,16 @@ close_sk: static void test_sockmap_invalid_update(void) { struct test_sockmap_invalid_update *skel; - int duration = 0; skel = test_sockmap_invalid_update__open_and_load(); - if (CHECK(skel, "open_and_load", "verifier accepted map_update\n")) + if (!ASSERT_NULL(skel, "open_and_load")) test_sockmap_invalid_update__destroy(skel); } static void test_sockmap_copy(enum bpf_map_type map_type) { DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); - int err, len, src_fd, iter_fd, duration = 0; + int err, len, src_fd, iter_fd; union bpf_iter_link_info linfo = {}; __u32 i, num_sockets, num_elems; struct bpf_iter_sockmap *skel; @@ -208,7 +195,7 @@ static void test_sockmap_copy(enum bpf_map_type map_type) char buf[64]; skel = bpf_iter_sockmap__open_and_load(); - if (CHECK(!skel, "bpf_iter_sockmap__open_and_load", "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) return; if (map_type == BPF_MAP_TYPE_SOCKMAP) { @@ -222,7 +209,7 @@ static void test_sockmap_copy(enum bpf_map_type map_type) } sock_fd = calloc(num_sockets, sizeof(*sock_fd)); - if (CHECK(!sock_fd, "calloc(sock_fd)", "failed to allocate\n")) + if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)")) goto out; for (i = 0; i < num_sockets; i++) @@ -232,11 +219,11 @@ static void test_sockmap_copy(enum bpf_map_type map_type) for (i = 0; i < num_sockets; i++) { sock_fd[i] = connected_socket_v4(); - if (CHECK(sock_fd[i] == -1, "connected_socket_v4", "cannot connect\n")) + if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4")) goto out; err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST); - if (CHECK(err, "map_update", "failed: %s\n", strerror(errno))) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -248,22 +235,20 @@ static void test_sockmap_copy(enum bpf_map_type map_type) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0) ; - if (CHECK(len < 0, "read", "failed: %s\n", strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto close_iter; /* test results */ - if (CHECK(skel->bss->elems != num_elems, "elems", "got %u expected %u\n", - skel->bss->elems, num_elems)) + if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems")) goto close_iter; - if (CHECK(skel->bss->socks != num_sockets, "socks", "got %u expected %u\n", - skel->bss->socks, num_sockets)) + if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks")) goto close_iter; compare_cookies(src, skel->maps.dst); @@ -288,28 +273,22 @@ static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first, int err, map, verdict; skel = test_sockmap_skb_verdict_attach__open_and_load(); - if (CHECK_FAIL(!skel)) { - perror("test_sockmap_skb_verdict_attach__open_and_load"); + if (!ASSERT_OK_PTR(skel, "open_and_load")) return; - } verdict = bpf_program__fd(skel->progs.prog_skb_verdict); map = bpf_map__fd(skel->maps.sock_map); err = bpf_prog_attach(verdict, map, first, 0); - if (CHECK_FAIL(err)) { - perror("bpf_prog_attach"); + if (!ASSERT_OK(err, "bpf_prog_attach")) goto out; - } err = bpf_prog_attach(verdict, map, second, 0); ASSERT_EQ(err, -EBUSY, "prog_attach_fail"); err = bpf_prog_detach2(verdict, map, first); - if (CHECK_FAIL(err)) { - perror("bpf_prog_detach2"); + if (!ASSERT_OK(err, "bpf_prog_detach2")) goto out; - } out: test_sockmap_skb_verdict_attach__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index e172d89e92e1..2d0796314862 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -15,16 +15,12 @@ static int tcp_server(int family) int err, s; s = socket(family, SOCK_STREAM, 0); - if (CHECK_FAIL(s == -1)) { - perror("socket"); + if (!ASSERT_GE(s, 0, "socket")) return -1; - } err = listen(s, SOMAXCONN); - if (CHECK_FAIL(err)) { - perror("listen"); + if (!ASSERT_OK(err, "listen")) return -1; - } return s; } @@ -48,44 +44,31 @@ static void test_sockmap_ktls_disconnect_after_delete(int family, int map) return; err = getsockname(srv, (struct sockaddr *)&addr, &len); - if (CHECK_FAIL(err)) { - perror("getsockopt"); + if (!ASSERT_OK(err, "getsockopt")) goto close_srv; - } cli = socket(family, SOCK_STREAM, 0); - if (CHECK_FAIL(cli == -1)) { - perror("socket"); + if (!ASSERT_GE(cli, 0, "socket")) goto close_srv; - } err = connect(cli, (struct sockaddr *)&addr, len); - if (CHECK_FAIL(err)) { - perror("connect"); + if (!ASSERT_OK(err, "connect")) goto close_cli; - } err = bpf_map_update_elem(map, &zero, &cli, 0); - if (CHECK_FAIL(err)) { - perror("bpf_map_update_elem"); + if (!ASSERT_OK(err, "bpf_map_update_elem")) goto close_cli; - } err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); - if (CHECK_FAIL(err)) { - perror("setsockopt(TCP_ULP)"); + if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) goto close_cli; - } err = bpf_map_delete_elem(map, &zero); - if (CHECK_FAIL(err)) { - perror("bpf_map_delete_elem"); + if (!ASSERT_OK(err, "bpf_map_delete_elem")) goto close_cli; - } err = disconnect(cli); - if (CHECK_FAIL(err)) - perror("disconnect"); + ASSERT_OK(err, "disconnect"); close_cli: close(cli); @@ -168,10 +151,8 @@ static void run_tests(int family, enum bpf_map_type map_type) int map; map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); - if (CHECK_FAIL(map < 0)) { - perror("bpf_map_create"); + if (!ASSERT_GE(map, 0, "bpf_map_create")) return; - } if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type))) test_sockmap_ktls_disconnect_after_delete(family, map); diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c index cd09f4c7dd92..aa4debf62fc6 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c @@ -972,12 +972,12 @@ void test_sockopt(void) int cgroup_fd, i; cgroup_fd = test__join_cgroup("/sockopt"); - if (CHECK_FAIL(cgroup_fd < 0)) + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) return; for (i = 0; i < ARRAY_SIZE(tests); i++) { test__start_subtest(tests[i].descr); - CHECK_FAIL(run_test(cgroup_fd, &tests[i])); + ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr); } close(cgroup_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index c5cb6e8374b6..60c17a8e2789 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -76,20 +76,16 @@ static void *server_thread(void *arg) pthread_cond_signal(&server_started); pthread_mutex_unlock(&server_started_mtx); - if (CHECK_FAIL(err < 0)) { - perror("Failed to listed on socket"); + if (!ASSERT_GE(err, 0, "listed on socket")) return NULL; - } err += verify_sockopt(fd, CUSTOM_INHERIT1, "listen", 1); err += verify_sockopt(fd, CUSTOM_INHERIT2, "listen", 1); err += verify_sockopt(fd, CUSTOM_LISTENER, "listen", 1); client_fd = accept(fd, (struct sockaddr *)&addr, &len); - if (CHECK_FAIL(client_fd < 0)) { - perror("Failed to accept client"); + if (!ASSERT_GE(client_fd, 0, "accept client")) return NULL; - } err += verify_sockopt(client_fd, CUSTOM_INHERIT1, "accept", 1); err += verify_sockopt(client_fd, CUSTOM_INHERIT2, "accept", 1); @@ -183,20 +179,20 @@ static void run_test(int cgroup_fd) goto close_bpf_object; err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt", "_getsockopt"); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "prog_attach _getsockopt")) goto close_bpf_object; err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt", "_setsockopt"); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "prog_attach _setsockopt")) goto close_bpf_object; server_fd = start_server(); - if (CHECK_FAIL(server_fd < 0)) + if (!ASSERT_GE(server_fd, 0, "start_server")) goto close_bpf_object; pthread_mutex_lock(&server_started_mtx); - if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread, - (void *)&server_fd))) { + if (!ASSERT_OK(pthread_create(&tid, NULL, server_thread, + (void *)&server_fd), "pthread_create")) { pthread_mutex_unlock(&server_started_mtx); goto close_server_fd; } @@ -204,17 +200,17 @@ static void run_test(int cgroup_fd) pthread_mutex_unlock(&server_started_mtx); client_fd = connect_to_server(server_fd); - if (CHECK_FAIL(client_fd < 0)) + if (!ASSERT_GE(client_fd, 0, "connect_to_server")) goto close_server_fd; - CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_INHERIT1, "connect", 0)); - CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_INHERIT2, "connect", 0)); - CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_LISTENER, "connect", 0)); + ASSERT_OK(verify_sockopt(client_fd, CUSTOM_INHERIT1, "connect", 0), "verify_sockopt1"); + ASSERT_OK(verify_sockopt(client_fd, CUSTOM_INHERIT2, "connect", 0), "verify_sockopt2"); + ASSERT_OK(verify_sockopt(client_fd, CUSTOM_LISTENER, "connect", 0), "verify_sockopt ener"); pthread_join(tid, &server_err); err = (int)(long)server_err; - CHECK_FAIL(err); + ASSERT_OK(err, "pthread_join retval"); close(client_fd); @@ -229,7 +225,7 @@ void test_sockopt_inherit(void) int cgroup_fd; cgroup_fd = test__join_cgroup("/sockopt_inherit"); - if (CHECK_FAIL(cgroup_fd < 0)) + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) return; run_test(cgroup_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index 28d592dc54a7..7f5659349011 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -303,11 +303,11 @@ void test_sockopt_multi(void) int err = -1; cg_parent = test__join_cgroup("/parent"); - if (CHECK_FAIL(cg_parent < 0)) + if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent")) goto out; cg_child = test__join_cgroup("/parent/child"); - if (CHECK_FAIL(cg_child < 0)) + if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child")) goto out; obj = bpf_object__open_file("sockopt_multi.bpf.o", NULL); @@ -319,11 +319,11 @@ void test_sockopt_multi(void) goto out; sock_fd = socket(AF_INET, SOCK_STREAM, 0); - if (CHECK_FAIL(sock_fd < 0)) + if (!ASSERT_GE(sock_fd, 0, "socket")) goto out; - CHECK_FAIL(run_getsockopt_test(obj, cg_parent, cg_child, sock_fd)); - CHECK_FAIL(run_setsockopt_test(obj, cg_parent, cg_child, sock_fd)); + ASSERT_OK(run_getsockopt_test(obj, cg_parent, cg_child, sock_fd), "getsockopt_test"); + ASSERT_OK(run_setsockopt_test(obj, cg_parent, cg_child, sock_fd), "setsockopt_test"); out: close(sock_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c index 30a99d2ed5c6..60d952719d27 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -223,7 +223,7 @@ void test_sockopt_sk(void) int cgroup_fd; cgroup_fd = test__join_cgroup("/sockopt_sk"); - if (CHECK_FAIL(cgroup_fd < 0)) + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /sockopt_sk")) return; run_test(cgroup_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c index 032dbfb26256..e070bca2b764 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c @@ -6,11 +6,9 @@ void test_tcp_estats(void) const char *file = "./test_tcp_estats.bpf.o"; int err, prog_fd; struct bpf_object *obj; - __u32 duration = 0; err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); - CHECK(err, "", "err %d errno %d\n", err, errno); - if (err) + if (!ASSERT_OK(err, "")) return; bpf_object__close(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c index 1fa772079967..617bbce6ef8f 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c @@ -42,33 +42,10 @@ struct sk_fds { static int create_netns(void) { - if (CHECK(unshare(CLONE_NEWNET), "create netns", - "unshare(CLONE_NEWNET): %s (%d)", - strerror(errno), errno)) + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) return -1; - if (CHECK(system("ip link set dev lo up"), "run ip cmd", - "failed to bring lo link up\n")) - return -1; - - return 0; -} - -static int write_sysctl(const char *sysctl, const char *value) -{ - int fd, err, len; - - fd = open(sysctl, O_WRONLY); - if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n", - sysctl, strerror(errno), errno)) - return -1; - - len = strlen(value); - err = write(fd, value, len); - close(fd); - if (CHECK(err != len, "write sysctl", - "write(%s, %s): err:%d %s (%d)\n", - sysctl, value, err, strerror(errno), errno)) + if (!ASSERT_OK(system("ip link set dev lo up"), "run ip cmd")) return -1; return 0; @@ -100,16 +77,12 @@ static int sk_fds_shutdown(struct sk_fds *sk_fds) shutdown(sk_fds->active_fd, SHUT_WR); ret = read(sk_fds->passive_fd, &abyte, sizeof(abyte)); - if (CHECK(ret != 0, "read-after-shutdown(passive_fd):", - "ret:%d %s (%d)\n", - ret, strerror(errno), errno)) + if (!ASSERT_EQ(ret, 0, "read-after-shutdown(passive_fd):")) return -1; shutdown(sk_fds->passive_fd, SHUT_WR); ret = read(sk_fds->active_fd, &abyte, sizeof(abyte)); - if (CHECK(ret != 0, "read-after-shutdown(active_fd):", - "ret:%d %s (%d)\n", - ret, strerror(errno), errno)) + if (!ASSERT_EQ(ret, 0, "read-after-shutdown(active_fd):")) return -1; return 0; @@ -122,8 +95,7 @@ static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open) socklen_t len; sk_fds->srv_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0); - if (CHECK(sk_fds->srv_fd == -1, "start_server", "%s (%d)\n", - strerror(errno), errno)) + if (!ASSERT_NEQ(sk_fds->srv_fd, -1, "start_server")) goto error; if (fast_open) @@ -132,28 +104,25 @@ static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open) else sk_fds->active_fd = connect_to_fd(sk_fds->srv_fd, 0); - if (CHECK_FAIL(sk_fds->active_fd == -1)) { + if (!ASSERT_NEQ(sk_fds->active_fd, -1, "")) { close(sk_fds->srv_fd); goto error; } len = sizeof(addr6); - if (CHECK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6, - &len), "getsockname(srv_fd)", "%s (%d)\n", - strerror(errno), errno)) + if (!ASSERT_OK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6, + &len), "getsockname(srv_fd)")) goto error_close; sk_fds->passive_lport = ntohs(addr6.sin6_port); len = sizeof(addr6); - if (CHECK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6, - &len), "getsockname(active_fd)", "%s (%d)\n", - strerror(errno), errno)) + if (!ASSERT_OK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6, + &len), "getsockname(active_fd)")) goto error_close; sk_fds->active_lport = ntohs(addr6.sin6_port); sk_fds->passive_fd = accept(sk_fds->srv_fd, NULL, 0); - if (CHECK(sk_fds->passive_fd == -1, "accept(srv_fd)", "%s (%d)\n", - strerror(errno), errno)) + if (!ASSERT_NEQ(sk_fds->passive_fd, -1, "accept(srv_fd)")) goto error_close; if (fast_open) { @@ -161,8 +130,7 @@ static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open) int ret; ret = read(sk_fds->passive_fd, bytes_in, sizeof(bytes_in)); - if (CHECK(ret != sizeof(fast), "read fastopen syn data", - "expected=%lu actual=%d\n", sizeof(fast), ret)) { + if (!ASSERT_EQ(ret, sizeof(fast), "read fastopen syn data")) { close(sk_fds->passive_fd); goto error_close; } @@ -183,8 +151,7 @@ static int check_hdr_opt(const struct bpf_test_option *exp, const struct bpf_test_option *act, const char *hdr_desc) { - if (CHECK(memcmp(exp, act, sizeof(*exp)), - "expected-vs-actual", "unexpected %s\n", hdr_desc)) { + if (!ASSERT_OK(memcmp(exp, act, sizeof(*exp)), hdr_desc)) { print_option(exp, "expected: "); print_option(act, " actual: "); return -1; @@ -198,13 +165,11 @@ static int check_hdr_stg(const struct hdr_stg *exp, int fd, { struct hdr_stg act; - if (CHECK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act), - "map_lookup(hdr_stg_map_fd)", "%s %s (%d)\n", - stg_desc, strerror(errno), errno)) + if (!ASSERT_OK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act), + "map_lookup(hdr_stg_map_fd)")) return -1; - if (CHECK(memcmp(exp, &act, sizeof(*exp)), - "expected-vs-actual", "unexpected %s\n", stg_desc)) { + if (!ASSERT_OK(memcmp(exp, &act, sizeof(*exp)), stg_desc)) { print_hdr_stg(exp, "expected: "); print_hdr_stg(&act, " actual: "); return -1; @@ -248,9 +213,8 @@ static void check_hdr_and_close_fds(struct sk_fds *sk_fds) if (sk_fds_shutdown(sk_fds)) goto check_linum; - if (CHECK(expected_inherit_cb_flags != skel->bss->inherit_cb_flags, - "Unexpected inherit_cb_flags", "0x%x != 0x%x\n", - skel->bss->inherit_cb_flags, expected_inherit_cb_flags)) + if (!ASSERT_EQ(expected_inherit_cb_flags, skel->bss->inherit_cb_flags, + "inherit_cb_flags")) goto check_linum; if (check_hdr_stg(&exp_passive_hdr_stg, sk_fds->passive_fd, @@ -277,7 +241,7 @@ static void check_hdr_and_close_fds(struct sk_fds *sk_fds) "active_fin_in"); check_linum: - CHECK_FAIL(check_error_linum(sk_fds)); + ASSERT_FALSE(check_error_linum(sk_fds), "check_error_linum"); sk_fds_close(sk_fds); } @@ -517,26 +481,20 @@ static void misc(void) /* MSG_EOR to ensure skb will not be combined */ ret = send(sk_fds.active_fd, send_msg, sizeof(send_msg), MSG_EOR); - if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n", - ret)) + if (!ASSERT_EQ(ret, sizeof(send_msg), "send(msg)")) goto check_linum; ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg)); - if (CHECK(ret != sizeof(send_msg), "read(msg)", "ret:%d\n", - ret)) + if (ASSERT_EQ(ret, sizeof(send_msg), "read(msg)")) goto check_linum; } if (sk_fds_shutdown(&sk_fds)) goto check_linum; - CHECK(misc_skel->bss->nr_syn != 1, "unexpected nr_syn", - "expected (1) != actual (%u)\n", - misc_skel->bss->nr_syn); + ASSERT_EQ(misc_skel->bss->nr_syn, 1, "unexpected nr_syn"); - CHECK(misc_skel->bss->nr_data != nr_data, "unexpected nr_data", - "expected (%u) != actual (%u)\n", - nr_data, misc_skel->bss->nr_data); + ASSERT_EQ(misc_skel->bss->nr_data, nr_data, "unexpected nr_data"); /* The last ACK may have been delayed, so it is either 1 or 2. */ CHECK(misc_skel->bss->nr_pure_ack != 1 && @@ -545,12 +503,10 @@ static void misc(void) "expected (1 or 2) != actual (%u)\n", misc_skel->bss->nr_pure_ack); - CHECK(misc_skel->bss->nr_fin != 1, "unexpected nr_fin", - "expected (1) != actual (%u)\n", - misc_skel->bss->nr_fin); + ASSERT_EQ(misc_skel->bss->nr_fin, 1, "unexpected nr_fin"); check_linum: - CHECK_FAIL(check_error_linum(&sk_fds)); + ASSERT_FALSE(check_error_linum(&sk_fds), "check_error_linum"); sk_fds_close(&sk_fds); bpf_link__destroy(link); } @@ -575,15 +531,15 @@ void test_tcp_hdr_options(void) int i; skel = test_tcp_hdr_options__open_and_load(); - if (CHECK(!skel, "open and load skel", "failed")) + if (!ASSERT_OK_PTR(skel, "open and load skel")) return; misc_skel = test_misc_tcp_hdr_options__open_and_load(); - if (CHECK(!misc_skel, "open and load misc test skel", "failed")) + if (!ASSERT_OK_PTR(misc_skel, "open and load misc test skel")) goto skel_destroy; cg_fd = test__join_cgroup(CG_NAME); - if (CHECK_FAIL(cg_fd < 0)) + if (ASSERT_GE(cg_fd, 0, "join_cgroup")) goto skel_destroy; for (i = 0; i < ARRAY_SIZE(tests); i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index 96ff2c20af81..8fe84da1b9b4 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -16,8 +16,7 @@ static void send_byte(int fd) { char b = 0x55; - if (CHECK_FAIL(write(fd, &b, sizeof(b)) != 1)) - perror("Failed to send single byte"); + ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte"); } static int wait_for_ack(int fd, int retries) @@ -51,10 +50,8 @@ static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked, int err = 0; struct tcp_rtt_storage val; - if (CHECK_FAIL(bpf_map_lookup_elem(map_fd, &client_fd, &val) < 0)) { - perror("Failed to read socket storage"); + if (!ASSERT_GE(bpf_map_lookup_elem(map_fd, &client_fd, &val), 0, "read socket storage")) return -1; - } if (val.invoked != invoked) { log_err("%s: unexpected bpf_tcp_sock.invoked %d != %d", @@ -151,14 +148,14 @@ void test_tcp_rtt(void) int server_fd, cgroup_fd; cgroup_fd = test__join_cgroup("/tcp_rtt"); - if (CHECK_FAIL(cgroup_fd < 0)) + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /tcp_rtt")) return; server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); - if (CHECK_FAIL(server_fd < 0)) + if (!ASSERT_GE(server_fd, 0, "start_server")) goto close_cgroup_fd; - CHECK_FAIL(run_test(cgroup_fd, server_fd)); + ASSERT_OK(run_test(cgroup_fd, server_fd), "run_test"); close(server_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c index 87923d2865b7..7e8fe1bad03f 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c +++ b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c @@ -8,8 +8,6 @@ #define LO_ADDR6 "::1" #define CG_NAME "/tcpbpf-user-test" -static __u32 duration; - static void verify_result(struct tcpbpf_globals *result) { __u32 expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) | @@ -22,9 +20,7 @@ static void verify_result(struct tcpbpf_globals *result) (1 << BPF_SOCK_OPS_TCP_LISTEN_CB)); /* check global map */ - CHECK(expected_events != result->event_map, "event_map", - "unexpected event_map: actual 0x%08x != expected 0x%08x\n", - result->event_map, expected_events); + ASSERT_EQ(expected_events, result->event_map, "event_map"); ASSERT_EQ(result->bytes_received, 501, "bytes_received"); ASSERT_EQ(result->bytes_acked, 1002, "bytes_acked"); @@ -56,18 +52,15 @@ static void run_test(struct tcpbpf_globals *result) int i, rv; listen_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0); - if (CHECK(listen_fd == -1, "start_server", "listen_fd:%d errno:%d\n", - listen_fd, errno)) + if (!ASSERT_NEQ(listen_fd, -1, "start_server")) goto done; cli_fd = connect_to_fd(listen_fd, 0); - if (CHECK(cli_fd == -1, "connect_to_fd(listen_fd)", - "cli_fd:%d errno:%d\n", cli_fd, errno)) + if (!ASSERT_NEQ(cli_fd, -1, "connect_to_fd(listen_fd)")) goto done; accept_fd = accept(listen_fd, NULL, NULL); - if (CHECK(accept_fd == -1, "accept(listen_fd)", - "accept_fd:%d errno:%d\n", accept_fd, errno)) + if (!ASSERT_NEQ(accept_fd, -1, "accept(listen_fd)")) goto done; /* Send 1000B of '+'s from cli_fd -> accept_fd */ @@ -75,11 +68,11 @@ static void run_test(struct tcpbpf_globals *result) buf[i] = '+'; rv = send(cli_fd, buf, 1000, 0); - if (CHECK(rv != 1000, "send(cli_fd)", "rv:%d errno:%d\n", rv, errno)) + if (!ASSERT_EQ(rv, 1000, "send(cli_fd)")) goto done; rv = recv(accept_fd, buf, 1000, 0); - if (CHECK(rv != 1000, "recv(accept_fd)", "rv:%d errno:%d\n", rv, errno)) + if (!ASSERT_EQ(rv, 1000, "recv(accept_fd)")) goto done; /* Send 500B of '.'s from accept_fd ->cli_fd */ @@ -87,11 +80,11 @@ static void run_test(struct tcpbpf_globals *result) buf[i] = '.'; rv = send(accept_fd, buf, 500, 0); - if (CHECK(rv != 500, "send(accept_fd)", "rv:%d errno:%d\n", rv, errno)) + if (!ASSERT_EQ(rv, 500, "send(accept_fd)")) goto done; rv = recv(cli_fd, buf, 500, 0); - if (CHECK(rv != 500, "recv(cli_fd)", "rv:%d errno:%d\n", rv, errno)) + if (!ASSERT_EQ(rv, 500, "recv(cli_fd)")) goto done; /* @@ -100,12 +93,12 @@ static void run_test(struct tcpbpf_globals *result) */ shutdown(accept_fd, SHUT_WR); err = recv(cli_fd, buf, 1, 0); - if (CHECK(err, "recv(cli_fd) for fin", "err:%d errno:%d\n", err, errno)) + if (!ASSERT_OK(err, "recv(cli_fd) for fin")) goto done; shutdown(cli_fd, SHUT_WR); err = recv(accept_fd, buf, 1, 0); - CHECK(err, "recv(accept_fd) for fin", "err:%d errno:%d\n", err, errno); + ASSERT_OK(err, "recv(accept_fd) for fin"); done: if (accept_fd != -1) close(accept_fd); @@ -124,12 +117,11 @@ void test_tcpbpf_user(void) int cg_fd = -1; skel = test_tcpbpf_kern__open_and_load(); - if (CHECK(!skel, "open and load skel", "failed")) + if (!ASSERT_OK_PTR(skel, "open and load skel")) return; cg_fd = test__join_cgroup(CG_NAME); - if (CHECK(cg_fd < 0, "test__join_cgroup(" CG_NAME ")", - "cg_fd:%d errno:%d", cg_fd, errno)) + if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup(" CG_NAME ")")) goto err; skel->links.bpf_testcb = bpf_program__attach_cgroup(skel->progs.bpf_testcb, cg_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c new file mode 100644 index 000000000000..d5022b91d1e4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <test_progs.h> +#include "tracing_struct.skel.h" + +static void test_fentry(void) +{ + struct tracing_struct *skel; + int err; + + skel = tracing_struct__open_and_load(); + if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load")) + return; + + err = tracing_struct__attach(skel); + if (!ASSERT_OK(err, "tracing_struct__attach")) + return; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + + ASSERT_EQ(skel->bss->t1_a_a, 2, "t1:a.a"); + ASSERT_EQ(skel->bss->t1_a_b, 3, "t1:a.b"); + ASSERT_EQ(skel->bss->t1_b, 1, "t1:b"); + ASSERT_EQ(skel->bss->t1_c, 4, "t1:c"); + + ASSERT_EQ(skel->bss->t1_nregs, 4, "t1 nregs"); + ASSERT_EQ(skel->bss->t1_reg0, 2, "t1 reg0"); + ASSERT_EQ(skel->bss->t1_reg1, 3, "t1 reg1"); + ASSERT_EQ(skel->bss->t1_reg2, 1, "t1 reg2"); + ASSERT_EQ(skel->bss->t1_reg3, 4, "t1 reg3"); + ASSERT_EQ(skel->bss->t1_ret, 10, "t1 ret"); + + ASSERT_EQ(skel->bss->t2_a, 1, "t2:a"); + ASSERT_EQ(skel->bss->t2_b_a, 2, "t2:b.a"); + ASSERT_EQ(skel->bss->t2_b_b, 3, "t2:b.b"); + ASSERT_EQ(skel->bss->t2_c, 4, "t2:c"); + ASSERT_EQ(skel->bss->t2_ret, 10, "t2 ret"); + + ASSERT_EQ(skel->bss->t3_a, 1, "t3:a"); + ASSERT_EQ(skel->bss->t3_b, 4, "t3:b"); + ASSERT_EQ(skel->bss->t3_c_a, 2, "t3:c.a"); + ASSERT_EQ(skel->bss->t3_c_b, 3, "t3:c.b"); + ASSERT_EQ(skel->bss->t3_ret, 10, "t3 ret"); + + ASSERT_EQ(skel->bss->t4_a_a, 10, "t4:a.a"); + ASSERT_EQ(skel->bss->t4_b, 1, "t4:b"); + ASSERT_EQ(skel->bss->t4_c, 2, "t4:c"); + ASSERT_EQ(skel->bss->t4_d, 3, "t4:d"); + ASSERT_EQ(skel->bss->t4_e_a, 2, "t4:e.a"); + ASSERT_EQ(skel->bss->t4_e_b, 3, "t4:e.b"); + ASSERT_EQ(skel->bss->t4_ret, 21, "t4 ret"); + + ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret"); + + tracing_struct__detach(skel); + tracing_struct__destroy(skel); +} + +void test_tracing_struct(void) +{ + test_fentry(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/udp_limit.c b/tools/testing/selftests/bpf/prog_tests/udp_limit.c index 56c9d6bd38a3..2643d896ddae 100644 --- a/tools/testing/selftests/bpf/prog_tests/udp_limit.c +++ b/tools/testing/selftests/bpf/prog_tests/udp_limit.c @@ -5,8 +5,6 @@ #include <sys/types.h> #include <sys/socket.h> -static int duration; - void test_udp_limit(void) { struct udp_limit *skel; @@ -14,11 +12,11 @@ void test_udp_limit(void) int cgroup_fd; cgroup_fd = test__join_cgroup("/udp_limit"); - if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno)) + if (!ASSERT_GE(cgroup_fd, 0, "cg-join")) return; skel = udp_limit__open_and_load(); - if (CHECK(!skel, "skel-load", "errno %d", errno)) + if (!ASSERT_OK_PTR(skel, "skel-load")) goto close_cgroup_fd; skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd); @@ -32,11 +30,11 @@ void test_udp_limit(void) * verify that. */ fd1 = socket(AF_INET, SOCK_DGRAM, 0); - if (CHECK(fd1 < 0, "fd1", "errno %d", errno)) + if (!ASSERT_GE(fd1, 0, "socket(fd1)")) goto close_skeleton; fd2 = socket(AF_INET, SOCK_DGRAM, 0); - if (CHECK(fd2 >= 0, "fd2", "errno %d", errno)) + if (!ASSERT_LT(fd2, 0, "socket(fd2)")) goto close_skeleton; /* We can reopen again after close. */ @@ -44,7 +42,7 @@ void test_udp_limit(void) fd1 = -1; fd1 = socket(AF_INET, SOCK_DGRAM, 0); - if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno)) + if (!ASSERT_GE(fd1, 0, "socket(fd1-again)")) goto close_skeleton; /* Make sure the program was invoked the expected @@ -54,13 +52,11 @@ void test_udp_limit(void) * - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE * - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE */ - if (CHECK(skel->bss->invocations != 4, "bss-invocations", - "invocations=%d", skel->bss->invocations)) + if (!ASSERT_EQ(skel->bss->invocations, 4, "bss-invocations")) goto close_skeleton; /* We should still have a single socket in use */ - if (CHECK(skel->bss->in_use != 1, "bss-in_use", - "in_use=%d", skel->bss->in_use)) + if (!ASSERT_EQ(skel->bss->in_use, 1, "bss-in_use")) goto close_skeleton; close_skeleton: diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c new file mode 100644 index 000000000000..02b18d018b36 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -0,0 +1,754 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include <linux/compiler.h> +#include <linux/ring_buffer.h> +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <sys/sysinfo.h> +#include <test_progs.h> +#include <uapi/linux/bpf.h> +#include <unistd.h> + +#include "user_ringbuf_fail.skel.h" +#include "user_ringbuf_success.skel.h" + +#include "../progs/test_user_ringbuf.h" + +static size_t log_buf_sz = 1 << 20; /* 1 MB */ +static char obj_log_buf[1048576]; +static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ; +static const long c_ringbuf_size = 1 << 12; /* 1 small page */ +static const long c_max_entries = c_ringbuf_size / c_sample_size; + +static void drain_current_samples(void) +{ + syscall(__NR_getpgid); +} + +static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples) +{ + int i, err = 0; + + /* Write some number of samples to the ring buffer. */ + for (i = 0; i < num_samples; i++) { + struct sample *entry; + int read; + + entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry)); + if (!entry) { + err = -errno; + goto done; + } + + entry->pid = getpid(); + entry->seq = i; + entry->value = i * i; + + read = snprintf(entry->comm, sizeof(entry->comm), "%u", i); + if (read <= 0) { + /* Assert on the error path to avoid spamming logs with + * mostly success messages. + */ + ASSERT_GT(read, 0, "snprintf_comm"); + err = read; + user_ring_buffer__discard(ringbuf, entry); + goto done; + } + + user_ring_buffer__submit(ringbuf, entry); + } + +done: + drain_current_samples(); + + return err; +} + +static struct user_ringbuf_success *open_load_ringbuf_skel(void) +{ + struct user_ringbuf_success *skel; + int err; + + skel = user_ringbuf_success__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return NULL; + + err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size); + if (!ASSERT_OK(err, "set_max_entries")) + goto cleanup; + + err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size); + if (!ASSERT_OK(err, "set_max_entries")) + goto cleanup; + + err = user_ringbuf_success__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + return skel; + +cleanup: + user_ringbuf_success__destroy(skel); + return NULL; +} + +static void test_user_ringbuf_mappings(void) +{ + int err, rb_fd; + int page_size = getpagesize(); + void *mmap_ptr; + struct user_ringbuf_success *skel; + + skel = open_load_ringbuf_skel(); + if (!skel) + return; + + rb_fd = bpf_map__fd(skel->maps.user_ringbuf); + /* cons_pos can be mapped R/O, can't add +X with mprotect. */ + mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0); + ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect"); + ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos"); + err = -errno; + ASSERT_ERR(err, "wr_prod_pos_err"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons"); + + /* prod_pos can be mapped RW, can't add +X with mprotect. */ + mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, + rb_fd, page_size); + ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect"); + err = -errno; + ASSERT_ERR(err, "wr_prod_pos_err"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod"); + + /* data pages can be mapped RW, can't add +X with mprotect. */ + mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, + 2 * page_size); + ASSERT_OK_PTR(mmap_ptr, "rw_data"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect"); + err = -errno; + ASSERT_ERR(err, "exec_data_err"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data"); + + user_ringbuf_success__destroy(skel); +} + +static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out, + struct ring_buffer **kern_ringbuf_out, + ring_buffer_sample_fn callback, + struct user_ring_buffer **user_ringbuf_out) +{ + struct user_ringbuf_success *skel; + struct ring_buffer *kern_ringbuf = NULL; + struct user_ring_buffer *user_ringbuf = NULL; + int err = -ENOMEM, rb_fd; + + skel = open_load_ringbuf_skel(); + if (!skel) + return err; + + /* only trigger BPF program for current process */ + skel->bss->pid = getpid(); + + if (kern_ringbuf_out) { + rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf); + kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL); + if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create")) + goto cleanup; + + *kern_ringbuf_out = kern_ringbuf; + } + + if (user_ringbuf_out) { + rb_fd = bpf_map__fd(skel->maps.user_ringbuf); + user_ringbuf = user_ring_buffer__new(rb_fd, NULL); + if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create")) + goto cleanup; + + *user_ringbuf_out = user_ringbuf; + ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load"); + } + + err = user_ringbuf_success__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + *skel_out = skel; + return 0; + +cleanup: + if (kern_ringbuf_out) + *kern_ringbuf_out = NULL; + if (user_ringbuf_out) + *user_ringbuf_out = NULL; + ring_buffer__free(kern_ringbuf); + user_ring_buffer__free(user_ringbuf); + user_ringbuf_success__destroy(skel); + return err; +} + +static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out, + struct user_ring_buffer **ringbuf_out) +{ + return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out); +} + +static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel, + __u32 size, __u64 producer_pos, int err) +{ + void *data_ptr; + __u64 *producer_pos_ptr; + int rb_fd, page_size = getpagesize(); + + rb_fd = bpf_map__fd(skel->maps.user_ringbuf); + + ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample"); + + /* Map the producer_pos as RW. */ + producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, rb_fd, page_size); + ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr"); + + /* Map the data pages as RW. */ + data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); + ASSERT_OK_PTR(data_ptr, "rw_data"); + + memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ); + *(__u32 *)data_ptr = size; + + /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */ + smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ); + + drain_current_samples(); + ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample"); + ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample"); + + ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos"); + ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr"); +} + +static void test_user_ringbuf_post_misaligned(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + __u32 size = (1 << 5) + 7; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "misaligned_skel")) + return; + + manually_write_test_invalid_sample(skel, size, size, -EINVAL); + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_post_producer_wrong_offset(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + __u32 size = (1 << 5); + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "wrong_offset_skel")) + return; + + manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL); + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_post_larger_than_ringbuf_sz(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + __u32 size = c_ringbuf_size; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "huge_sample_skel")) + return; + + manually_write_test_invalid_sample(skel, size, size, -E2BIG); + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_basic(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "ringbuf_basic_skel")) + return; + + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + + err = write_samples(ringbuf, 2); + if (!ASSERT_OK(err, "write_samples")) + goto cleanup; + + ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after"); + +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_sample_full_ring_buffer(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + void *sample; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "ringbuf_full_sample_skel")) + return; + + sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ); + if (!ASSERT_OK_PTR(sample, "full_sample")) + goto cleanup; + + user_ring_buffer__submit(ringbuf, sample); + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + drain_current_samples(); + ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after"); + +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_post_alignment_autoadjust(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + struct sample *sample; + int err; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel")) + return; + + /* libbpf should automatically round any sample up to an 8-byte alignment. */ + sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1); + ASSERT_OK_PTR(sample, "reserve_autoaligned"); + user_ring_buffer__submit(ringbuf, sample); + + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + drain_current_samples(); + ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after"); + + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_overfill(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; + + err = write_samples(ringbuf, c_max_entries * 5); + ASSERT_ERR(err, "write_samples"); + ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries"); + + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_discards_properly_ignored(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err, num_discarded = 0; + __u64 *token; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; + + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + + while (1) { + /* Write samples until the buffer is full. */ + token = user_ring_buffer__reserve(ringbuf, sizeof(*token)); + if (!token) + break; + + user_ring_buffer__discard(ringbuf, token); + num_discarded++; + } + + if (!ASSERT_GE(num_discarded, 0, "num_discarded")) + goto cleanup; + + /* Should not read any samples, as they are all discarded. */ + ASSERT_EQ(skel->bss->read, 0, "num_pre_kick"); + drain_current_samples(); + ASSERT_EQ(skel->bss->read, 0, "num_post_kick"); + + /* Now that the ring buffer has been drained, we should be able to + * reserve another token. + */ + token = user_ring_buffer__reserve(ringbuf, sizeof(*token)); + + if (!ASSERT_OK_PTR(token, "new_token")) + goto cleanup; + + user_ring_buffer__discard(ringbuf, token); +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_loop(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + uint32_t total_samples = 8192; + uint32_t remaining_samples = total_samples; + int err; + + BUILD_BUG_ON(total_samples <= c_max_entries); + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; + + do { + uint32_t curr_samples; + + curr_samples = remaining_samples > c_max_entries + ? c_max_entries : remaining_samples; + err = write_samples(ringbuf, curr_samples); + if (err != 0) { + /* Assert inside of if statement to avoid flooding logs + * on the success path. + */ + ASSERT_OK(err, "write_samples"); + goto cleanup; + } + + remaining_samples -= curr_samples; + ASSERT_EQ(skel->bss->read, total_samples - remaining_samples, + "current_batched_entries"); + } while (remaining_samples > 0); + ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries"); + +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static int send_test_message(struct user_ring_buffer *ringbuf, + enum test_msg_op op, s64 operand_64, + s32 operand_32) +{ + struct test_msg *msg; + + msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg)); + if (!msg) { + /* Assert on the error path to avoid spamming logs with mostly + * success messages. + */ + ASSERT_OK_PTR(msg, "reserve_msg"); + return -ENOMEM; + } + + msg->msg_op = op; + + switch (op) { + case TEST_MSG_OP_INC64: + case TEST_MSG_OP_MUL64: + msg->operand_64 = operand_64; + break; + case TEST_MSG_OP_INC32: + case TEST_MSG_OP_MUL32: + msg->operand_32 = operand_32; + break; + default: + PRINT_FAIL("Invalid operand %d\n", op); + user_ring_buffer__discard(ringbuf, msg); + return -EINVAL; + } + + user_ring_buffer__submit(ringbuf, msg); + + return 0; +} + +static void kick_kernel_read_messages(void) +{ + syscall(__NR_prctl); +} + +static int handle_kernel_msg(void *ctx, void *data, size_t len) +{ + struct user_ringbuf_success *skel = ctx; + struct test_msg *msg = data; + + switch (msg->msg_op) { + case TEST_MSG_OP_INC64: + skel->bss->user_mutated += msg->operand_64; + return 0; + case TEST_MSG_OP_INC32: + skel->bss->user_mutated += msg->operand_32; + return 0; + case TEST_MSG_OP_MUL64: + skel->bss->user_mutated *= msg->operand_64; + return 0; + case TEST_MSG_OP_MUL32: + skel->bss->user_mutated *= msg->operand_32; + return 0; + default: + fprintf(stderr, "Invalid operand %d\n", msg->msg_op); + return -EINVAL; + } +} + +static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf, + struct user_ringbuf_success *skel) +{ + int cnt; + + cnt = ring_buffer__consume(kern_ringbuf); + ASSERT_EQ(cnt, 8, "consume_kern_ringbuf"); + ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err"); +} + +static void test_user_ringbuf_msg_protocol(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *user_ringbuf; + struct ring_buffer *kern_ringbuf; + int err, i; + __u64 expected_kern = 0; + + err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf); + if (!ASSERT_OK(err, "create_ringbufs")) + return; + + for (i = 0; i < 64; i++) { + enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS; + __u64 operand_64 = TEST_OP_64; + __u32 operand_32 = TEST_OP_32; + + err = send_test_message(user_ringbuf, op, operand_64, operand_32); + if (err) { + /* Only assert on a failure to avoid spamming success logs. */ + ASSERT_OK(err, "send_test_message"); + goto cleanup; + } + + switch (op) { + case TEST_MSG_OP_INC64: + expected_kern += operand_64; + break; + case TEST_MSG_OP_INC32: + expected_kern += operand_32; + break; + case TEST_MSG_OP_MUL64: + expected_kern *= operand_64; + break; + case TEST_MSG_OP_MUL32: + expected_kern *= operand_32; + break; + default: + PRINT_FAIL("Unexpected op %d\n", op); + goto cleanup; + } + + if (i % 8 == 0) { + kick_kernel_read_messages(); + ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern"); + ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err"); + drain_kernel_messages_buffer(kern_ringbuf, skel); + } + } + +cleanup: + ring_buffer__free(kern_ringbuf); + user_ring_buffer__free(user_ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void *kick_kernel_cb(void *arg) +{ + /* Kick the kernel, causing it to drain the ring buffer and then wake + * up the test thread waiting on epoll. + */ + syscall(__NR_getrlimit); + + return NULL; +} + +static int spawn_kick_thread_for_poll(void) +{ + pthread_t thread; + + return pthread_create(&thread, NULL, kick_kernel_cb, NULL); +} + +static void test_user_ringbuf_blocking_reserve(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err, num_written = 0; + __u64 *token; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; + + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + + while (1) { + /* Write samples until the buffer is full. */ + token = user_ring_buffer__reserve(ringbuf, sizeof(*token)); + if (!token) + break; + + *token = 0xdeadbeef; + + user_ring_buffer__submit(ringbuf, token); + num_written++; + } + + if (!ASSERT_GE(num_written, 0, "num_written")) + goto cleanup; + + /* Should not have read any samples until the kernel is kicked. */ + ASSERT_EQ(skel->bss->read, 0, "num_pre_kick"); + + /* We correctly time out after 1 second, without a sample. */ + token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000); + if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token")) + goto cleanup; + + err = spawn_kick_thread_for_poll(); + if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n")) + goto cleanup; + + /* After spawning another thread that asychronously kicks the kernel to + * drain the messages, we're able to block and successfully get a + * sample once we receive an event notification. + */ + token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000); + + if (!ASSERT_OK_PTR(token, "block_token")) + goto cleanup; + + ASSERT_GT(skel->bss->read, 0, "num_post_kill"); + ASSERT_LE(skel->bss->read, num_written, "num_post_kill"); + ASSERT_EQ(skel->bss->err, 0, "err_post_poll"); + user_ring_buffer__discard(ringbuf, token); + +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static struct { + const char *prog_name; + const char *expected_err_msg; +} failure_tests[] = { + /* failure cases */ + {"user_ringbuf_callback_bad_access1", "negative offset dynptr_ptr ptr"}, + {"user_ringbuf_callback_bad_access2", "dereference of modified dynptr_ptr ptr"}, + {"user_ringbuf_callback_write_forbidden", "invalid mem access 'dynptr_ptr'"}, + {"user_ringbuf_callback_null_context_write", "invalid mem access 'scalar'"}, + {"user_ringbuf_callback_null_context_read", "invalid mem access 'scalar'"}, + {"user_ringbuf_callback_discard_dynptr", "arg 1 is an unacquired reference"}, + {"user_ringbuf_callback_submit_dynptr", "arg 1 is an unacquired reference"}, + {"user_ringbuf_callback_invalid_return", "At callback return the register R0 has value"}, +}; + +#define SUCCESS_TEST(_func) { _func, #_func } + +static struct { + void (*test_callback)(void); + const char *test_name; +} success_tests[] = { + SUCCESS_TEST(test_user_ringbuf_mappings), + SUCCESS_TEST(test_user_ringbuf_post_misaligned), + SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset), + SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz), + SUCCESS_TEST(test_user_ringbuf_basic), + SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer), + SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust), + SUCCESS_TEST(test_user_ringbuf_overfill), + SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored), + SUCCESS_TEST(test_user_ringbuf_loop), + SUCCESS_TEST(test_user_ringbuf_msg_protocol), + SUCCESS_TEST(test_user_ringbuf_blocking_reserve), +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct bpf_program *prog; + struct user_ringbuf_fail *skel; + int err; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = user_ringbuf_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + bpf_map__set_max_entries(skel->maps.user_ringbuf, getpagesize()); + + err = user_ringbuf_fail__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + user_ringbuf_fail__destroy(skel); +} + +void test_user_ringbuf(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { + if (!test__start_subtest(success_tests[i].test_name)) + continue; + + success_tests[i].test_callback(); + } + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + if (!test__start_subtest(failure_tests[i].prog_name)) + continue; + + verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c new file mode 100644 index 000000000000..579d6ee83ce0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include <stdio.h> +#include <errno.h> +#include <stdlib.h> +#include <unistd.h> +#include <endian.h> +#include <limits.h> +#include <sys/stat.h> +#include <sys/wait.h> +#include <sys/mman.h> +#include <linux/keyctl.h> +#include <test_progs.h> + +#include "test_verify_pkcs7_sig.skel.h" + +#define MAX_DATA_SIZE (1024 * 1024) +#define MAX_SIG_SIZE 1024 + +#define VERIFY_USE_SECONDARY_KEYRING (1UL) +#define VERIFY_USE_PLATFORM_KEYRING (2UL) + +/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */ +#define MODULE_SIG_STRING "~Module signature appended~\n" + +/* + * Module signature information block. + * + * The constituents of the signature section are, in order: + * + * - Signer's name + * - Key identifier + * - Signature data + * - Information block + */ +struct module_signature { + __u8 algo; /* Public-key crypto algorithm [0] */ + __u8 hash; /* Digest algorithm [0] */ + __u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */ + __u8 signer_len; /* Length of signer's name [0] */ + __u8 key_id_len; /* Length of key identifier [0] */ + __u8 __pad[3]; + __be32 sig_len; /* Length of signature data */ +}; + +struct data { + __u8 data[MAX_DATA_SIZE]; + __u32 data_len; + __u8 sig[MAX_SIG_SIZE]; + __u32 sig_len; +}; + +static bool kfunc_not_supported; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, + va_list args) +{ + if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n")) + return 0; + + if (strcmp(va_arg(args, char *), "bpf_verify_pkcs7_signature")) + return 0; + + kfunc_not_supported = true; + return 0; +} + +static int _run_setup_process(const char *setup_dir, const char *cmd) +{ + int child_pid, child_status; + + child_pid = fork(); + if (child_pid == 0) { + execlp("./verify_sig_setup.sh", "./verify_sig_setup.sh", cmd, + setup_dir, NULL); + exit(errno); + + } else if (child_pid > 0) { + waitpid(child_pid, &child_status, 0); + return WEXITSTATUS(child_status); + } + + return -EINVAL; +} + +static int populate_data_item_str(const char *tmp_dir, struct data *data_item) +{ + struct stat st; + char data_template[] = "/tmp/dataXXXXXX"; + char path[PATH_MAX]; + int ret, fd, child_status, child_pid; + + data_item->data_len = 4; + memcpy(data_item->data, "test", data_item->data_len); + + fd = mkstemp(data_template); + if (fd == -1) + return -errno; + + ret = write(fd, data_item->data, data_item->data_len); + + close(fd); + + if (ret != data_item->data_len) { + ret = -EIO; + goto out; + } + + child_pid = fork(); + + if (child_pid == -1) { + ret = -errno; + goto out; + } + + if (child_pid == 0) { + snprintf(path, sizeof(path), "%s/signing_key.pem", tmp_dir); + + return execlp("./sign-file", "./sign-file", "-d", "sha256", + path, path, data_template, NULL); + } + + waitpid(child_pid, &child_status, 0); + + ret = WEXITSTATUS(child_status); + if (ret) + goto out; + + snprintf(path, sizeof(path), "%s.p7s", data_template); + + ret = stat(path, &st); + if (ret == -1) { + ret = -errno; + goto out; + } + + if (st.st_size > sizeof(data_item->sig)) { + ret = -EINVAL; + goto out_sig; + } + + data_item->sig_len = st.st_size; + + fd = open(path, O_RDONLY); + if (fd == -1) { + ret = -errno; + goto out_sig; + } + + ret = read(fd, data_item->sig, data_item->sig_len); + + close(fd); + + if (ret != data_item->sig_len) { + ret = -EIO; + goto out_sig; + } + + ret = 0; +out_sig: + unlink(path); +out: + unlink(data_template); + return ret; +} + +static int populate_data_item_mod(struct data *data_item) +{ + char mod_path[PATH_MAX], *mod_path_ptr; + struct stat st; + void *mod; + FILE *fp; + struct module_signature ms; + int ret, fd, modlen, marker_len, sig_len; + + data_item->data_len = 0; + + if (stat("/lib/modules", &st) == -1) + return 0; + + /* Requires CONFIG_TCP_CONG_BIC=m. */ + fp = popen("find /lib/modules/$(uname -r) -name tcp_bic.ko", "r"); + if (!fp) + return 0; + + mod_path_ptr = fgets(mod_path, sizeof(mod_path), fp); + pclose(fp); + + if (!mod_path_ptr) + return 0; + + mod_path_ptr = strchr(mod_path, '\n'); + if (!mod_path_ptr) + return 0; + + *mod_path_ptr = '\0'; + + if (stat(mod_path, &st) == -1) + return 0; + + modlen = st.st_size; + marker_len = sizeof(MODULE_SIG_STRING) - 1; + + fd = open(mod_path, O_RDONLY); + if (fd == -1) + return -errno; + + mod = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + + close(fd); + + if (mod == MAP_FAILED) + return -errno; + + if (strncmp(mod + modlen - marker_len, MODULE_SIG_STRING, marker_len)) { + ret = -EINVAL; + goto out; + } + + modlen -= marker_len; + + memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms)); + + sig_len = __be32_to_cpu(ms.sig_len); + modlen -= sig_len + sizeof(ms); + + if (modlen > sizeof(data_item->data)) { + ret = -E2BIG; + goto out; + } + + memcpy(data_item->data, mod, modlen); + data_item->data_len = modlen; + + if (sig_len > sizeof(data_item->sig)) { + ret = -E2BIG; + goto out; + } + + memcpy(data_item->sig, mod + modlen, sig_len); + data_item->sig_len = sig_len; + ret = 0; +out: + munmap(mod, st.st_size); + return ret; +} + +void test_verify_pkcs7_sig(void) +{ + libbpf_print_fn_t old_print_cb; + char tmp_dir_template[] = "/tmp/verify_sigXXXXXX"; + char *tmp_dir; + struct test_verify_pkcs7_sig *skel = NULL; + struct bpf_map *map; + struct data data; + int ret, zero = 0; + + /* Trigger creation of session keyring. */ + syscall(__NR_request_key, "keyring", "_uid.0", NULL, + KEY_SPEC_SESSION_KEYRING); + + tmp_dir = mkdtemp(tmp_dir_template); + if (!ASSERT_OK_PTR(tmp_dir, "mkdtemp")) + return; + + ret = _run_setup_process(tmp_dir, "setup"); + if (!ASSERT_OK(ret, "_run_setup_process")) + goto close_prog; + + skel = test_verify_pkcs7_sig__open(); + if (!ASSERT_OK_PTR(skel, "test_verify_pkcs7_sig__open")) + goto close_prog; + + old_print_cb = libbpf_set_print(libbpf_print_cb); + ret = test_verify_pkcs7_sig__load(skel); + libbpf_set_print(old_print_cb); + + if (ret < 0 && kfunc_not_supported) { + printf( + "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n", + __func__); + test__skip(); + goto close_prog; + } + + if (!ASSERT_OK(ret, "test_verify_pkcs7_sig__load")) + goto close_prog; + + ret = test_verify_pkcs7_sig__attach(skel); + if (!ASSERT_OK(ret, "test_verify_pkcs7_sig__attach")) + goto close_prog; + + map = bpf_object__find_map_by_name(skel->obj, "data_input"); + if (!ASSERT_OK_PTR(map, "data_input not found")) + goto close_prog; + + skel->bss->monitored_pid = getpid(); + + /* Test without data and signature. */ + skel->bss->user_keyring_serial = KEY_SPEC_SESSION_KEYRING; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input")) + goto close_prog; + + /* Test successful signature verification with session keyring. */ + ret = populate_data_item_str(tmp_dir, &data); + if (!ASSERT_OK(ret, "populate_data_item_str")) + goto close_prog; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem data_input")) + goto close_prog; + + /* Test successful signature verification with testing keyring. */ + skel->bss->user_keyring_serial = syscall(__NR_request_key, "keyring", + "ebpf_testing_keyring", NULL, + KEY_SPEC_SESSION_KEYRING); + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem data_input")) + goto close_prog; + + /* + * Ensure key_task_permission() is called and rejects the keyring + * (no Search permission). + */ + syscall(__NR_keyctl, KEYCTL_SETPERM, skel->bss->user_keyring_serial, + 0x37373737); + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input")) + goto close_prog; + + syscall(__NR_keyctl, KEYCTL_SETPERM, skel->bss->user_keyring_serial, + 0x3f3f3f3f); + + /* + * Ensure key_validate() is called and rejects the keyring (key expired) + */ + syscall(__NR_keyctl, KEYCTL_SET_TIMEOUT, + skel->bss->user_keyring_serial, 1); + sleep(1); + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input")) + goto close_prog; + + skel->bss->user_keyring_serial = KEY_SPEC_SESSION_KEYRING; + + /* Test with corrupted data (signature verification should fail). */ + data.data[0] = 'a'; + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input")) + goto close_prog; + + ret = populate_data_item_mod(&data); + if (!ASSERT_OK(ret, "populate_data_item_mod")) + goto close_prog; + + /* Test signature verification with system keyrings. */ + if (data.data_len) { + skel->bss->user_keyring_serial = 0; + skel->bss->system_keyring_id = 0; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, + BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem data_input")) + goto close_prog; + + skel->bss->system_keyring_id = VERIFY_USE_SECONDARY_KEYRING; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, + BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem data_input")) + goto close_prog; + + skel->bss->system_keyring_id = VERIFY_USE_PLATFORM_KEYRING; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, + BPF_ANY); + ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"); + } + +close_prog: + _run_setup_process(tmp_dir, "cleanup"); + + if (!skel) + return; + + skel->bss->monitored_pid = 0; + test_verify_pkcs7_sig__destroy(skel); +} |