diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
34 files changed, 2849 insertions, 197 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 970f09156eb4..4666f88f2bb4 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -2,7 +2,7 @@ #include <test_progs.h> #define MAX_INSNS 512 -#define MAX_MATCHES 16 +#define MAX_MATCHES 24 struct bpf_reg_match { unsigned int line; @@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = { */ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), @@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = { BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), @@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = { {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, /* Variable offset is added to R5 packet pointer, - * resulting in auxiliary alignment of 4. + * resulting in auxiliary alignment of 4. To avoid BPF + * verifier's precision backtracking logging + * interfering we also have a no-op R4 = R5 + * instruction to validate R5 state. We also check + * that R4 is what it should be in such case. */ - {17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + {18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + {18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5, resulting in * reg->off of 14. */ - {18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + {19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off * (14) which is 16. Then the variable offset is 4-byte * aligned, so the total offset is 4-byte aligned and * meets the load's requirements. */ - {23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, - {23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + {24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + {24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - {25, "R5_w=pkt(off=14,r=8"}, + {26, "R5_w=pkt(off=14,r=8"}, /* Variable offset is added to R5, resulting in a - * variable offset of (4n). + * variable offset of (4n). See comment for insn #18 + * for R4 = R5 trick. */ - {26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + {28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + {28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant is added to R5 again, setting reg->off to 18. */ - {27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + {29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* And once more we add a variable; resulting var_off * is still (4n), fixed offset is not changed. * Also, we create a new reg->id. */ - {28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, + {31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, + {31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (18) * which is 20. Then the variable offset is (4n), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, - {33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, + {35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, + {35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, }, }, { @@ -681,6 +691,6 @@ void test_align(void) if (!test__start_subtest(test->descr)) continue; - CHECK_FAIL(do_test_single(test)); + ASSERT_OK(do_test_single(test), test->descr); } } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 3369c5ec3a17..6f8ed61fc4b4 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -3,6 +3,7 @@ #include <test_progs.h> #include <unistd.h> #include <sys/syscall.h> +#include <task_local_storage_helpers.h> #include "bpf_iter_ipv6_route.skel.h" #include "bpf_iter_netlink.skel.h" #include "bpf_iter_bpf_map.skel.h" @@ -175,11 +176,6 @@ static void test_bpf_map(void) bpf_iter_bpf_map__destroy(skel); } -static int pidfd_open(pid_t pid, unsigned int flags) -{ - return syscall(SYS_pidfd_open, pid, flags); -} - static void check_bpf_link_info(const struct bpf_program *prog) { LIBBPF_OPTS(bpf_iter_attach_opts, opts); @@ -295,8 +291,8 @@ static void test_task_pidfd(void) union bpf_iter_link_info linfo; int pidfd; - pidfd = pidfd_open(getpid(), 0); - if (!ASSERT_GT(pidfd, 0, "pidfd_open")) + pidfd = sys_pidfd_open(getpid(), 0); + if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open")) return; memset(&linfo, 0, sizeof(linfo)); @@ -945,10 +941,10 @@ static void test_bpf_array_map(void) { __u64 val, expected_val = 0, res_first_val, first_val = 0; DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); - __u32 expected_key = 0, res_first_key; + __u32 key, expected_key = 0, res_first_key; + int err, i, map_fd, hash_fd, iter_fd; struct bpf_iter_bpf_array_map *skel; union bpf_iter_link_info linfo; - int err, i, map_fd, iter_fd; struct bpf_link *link; char buf[64] = {}; int len, start; @@ -1005,12 +1001,20 @@ static void test_bpf_array_map(void) if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; + hash_fd = bpf_map__fd(skel->maps.hashmap1); for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { err = bpf_map_lookup_elem(map_fd, &i, &val); - if (!ASSERT_OK(err, "map_lookup")) - goto out; - if (!ASSERT_EQ(i, val, "invalid_val")) - goto out; + if (!ASSERT_OK(err, "map_lookup arraymap1")) + goto close_iter; + if (!ASSERT_EQ(i, val, "invalid_val arraymap1")) + goto close_iter; + + val = i + 4; + err = bpf_map_lookup_elem(hash_fd, &val, &key); + if (!ASSERT_OK(err, "map_lookup hashmap1")) + goto close_iter; + if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1")) + goto close_iter; } close_iter: @@ -1498,7 +1502,6 @@ static noinline int trigger_func(int arg) static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc) { struct bpf_iter_vma_offset *skel; - struct bpf_link *link; char buf[16] = {}; int iter_fd, len; int pgsz, shift; @@ -1513,11 +1516,11 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool ; skel->bss->page_shift = shift; - link = bpf_program__attach_iter(skel->progs.get_vma_offset, opts); - if (!ASSERT_OK_PTR(link, "attach_iter")) - return; + skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts); + if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter")) + goto exit; - iter_fd = bpf_iter_create(bpf_link__fd(link)); + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset)); if (!ASSERT_GT(iter_fd, 0, "create_iter")) goto exit; @@ -1535,7 +1538,7 @@ static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool close(iter_fd); exit: - bpf_link__destroy(link); + bpf_iter_vma_offset__destroy(skel); } static void test_task_vma_offset(void) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index 8a838ea8bdf3..c8ba4009e4ab 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -49,14 +49,14 @@ out: static void test_bpf_nf_ct(int mode) { - const char *iptables = "iptables -t raw %s PREROUTING -j CONNMARK --set-mark 42/0"; + const char *iptables = "iptables-legacy -t raw %s PREROUTING -j CONNMARK --set-mark 42/0"; int srv_fd = -1, client_fd = -1, srv_client_fd = -1; struct sockaddr_in peer_addr = {}; struct test_bpf_nf *skel; int prog_fd, err; socklen_t len; u16 srv_port; - char cmd[64]; + char cmd[128]; LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), @@ -69,7 +69,7 @@ static void test_bpf_nf_ct(int mode) /* Enable connection tracking */ snprintf(cmd, sizeof(cmd), iptables, "-A"); - if (!ASSERT_OK(system(cmd), "iptables")) + if (!ASSERT_OK(system(cmd), cmd)) goto end; srv_port = (mode == TEST_XDP) ? 5005 : 5006; diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 24dd6214394e..de1b5b9eb93a 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3949,6 +3949,20 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid return type", }, { + .descr = "decl_tag test #17, func proto, argument", + .raw_types = { + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 4), (-1), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), /* [2] */ + BTF_FUNC_PROTO_ENC(0, 1), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_VAR_ENC(NAME_TBD, 2, 0), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag1\0var"), + .btf_load_err = true, + .err_str = "Invalid arg#1", +}, +{ .descr = "type_tag test #1", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ @@ -7133,7 +7147,7 @@ static struct btf_dedup_test dedup_tests[] = { BTF_ENUM_ENC(NAME_NTH(4), 456), /* [4] fwd enum 'e2' after full enum */ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4), - /* [5] incompatible fwd enum with different size */ + /* [5] fwd enum with different size, size does not matter for fwd */ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1), /* [6] incompatible full enum with different value */ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), @@ -7150,9 +7164,7 @@ static struct btf_dedup_test dedup_tests[] = { /* [2] full enum 'e2' */ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), BTF_ENUM_ENC(NAME_NTH(4), 456), - /* [3] incompatible fwd enum with different size */ - BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1), - /* [4] incompatible full enum with different value */ + /* [3] incompatible full enum with different value */ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), BTF_ENUM_ENC(NAME_NTH(2), 321), BTF_END_RAW, @@ -7611,7 +7623,263 @@ static struct btf_dedup_test dedup_tests[] = { BTF_STR_SEC("\0e1\0e1_val"), }, }, - +{ + .descr = "dedup: enum of different size: no dedup", + .input = { + .raw_types = { + /* [1] enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 1), + /* [2] enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 2), + BTF_ENUM_ENC(NAME_NTH(2), 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val"), + }, + .expect = { + .raw_types = { + /* [1] enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 1), + /* [2] enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 2), + BTF_ENUM_ENC(NAME_NTH(2), 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val"), + }, +}, +{ + .descr = "dedup: enum fwd to enum64", + .input = { + .raw_types = { + /* [1] enum64 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 0), + /* [2] enum 'e1' fwd */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4), + /* [3] typedef enum 'e1' td */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 2), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val\0td"), + }, + .expect = { + .raw_types = { + /* [1] enum64 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 0), + /* [2] typedef enum 'e1' td */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val\0td"), + }, +}, +{ + .descr = "dedup: enum64 fwd to enum", + .input = { + .raw_types = { + /* [1] enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 1), + /* [2] enum64 'e1' fwd */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8), + /* [3] typedef enum 'e1' td */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 2), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val\0td"), + }, + .expect = { + .raw_types = { + /* [1] enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 1), + /* [2] typedef enum 'e1' td */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val\0td"), + }, +}, +{ + .descr = "dedup: standalone fwd declaration struct", + /* + * Verify that CU1:foo and CU2:foo would be unified and that + * typedef/ptr would be updated to point to CU1:foo. + * + * // CU 1: + * struct foo { int x; }; + * + * // CU 2: + * struct foo; + * typedef struct foo *foo_ptr; + */ + .input = { + .raw_types = { + /* CU 1 */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + /* CU 2 */ + BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0foo\0x\0foo_ptr"), + }, + .expect = { + .raw_types = { + BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + BTF_PTR_ENC(1), /* [3] */ + BTF_TYPEDEF_ENC(NAME_NTH(3), 3), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0foo\0x\0foo_ptr"), + }, +}, +{ + .descr = "dedup: standalone fwd declaration union", + /* + * Verify that CU1:foo and CU2:foo would be unified and that + * typedef/ptr would be updated to point to CU1:foo. + * Same as "dedup: standalone fwd declaration struct" but for unions. + * + * // CU 1: + * union foo { int x; }; + * + * // CU 2: + * union foo; + * typedef union foo *foo_ptr; + */ + .input = { + .raw_types = { + /* CU 1 */ + BTF_UNION_ENC(NAME_NTH(1), 1, 4), /* [1] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + /* CU 2 */ + BTF_FWD_ENC(NAME_TBD, 1), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0foo\0x\0foo_ptr"), + }, + .expect = { + .raw_types = { + BTF_UNION_ENC(NAME_NTH(1), 1, 4), /* [1] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + BTF_PTR_ENC(1), /* [3] */ + BTF_TYPEDEF_ENC(NAME_NTH(3), 3), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0foo\0x\0foo_ptr"), + }, +}, +{ + .descr = "dedup: standalone fwd declaration wrong kind", + /* + * Negative test for btf_dedup_resolve_fwds: + * - CU1:foo is a struct, C2:foo is a union, thus CU2:foo is not deduped; + * - typedef/ptr should remain unchanged as well. + * + * // CU 1: + * struct foo { int x; }; + * + * // CU 2: + * union foo; + * typedef union foo *foo_ptr; + */ + .input = { + .raw_types = { + /* CU 1 */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + /* CU 2 */ + BTF_FWD_ENC(NAME_NTH(3), 1), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0foo\0x\0foo_ptr"), + }, + .expect = { + .raw_types = { + /* CU 1 */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + /* CU 2 */ + BTF_FWD_ENC(NAME_NTH(3), 1), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + BTF_TYPEDEF_ENC(NAME_NTH(3), 4), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0foo\0x\0foo_ptr"), + }, +}, +{ + .descr = "dedup: standalone fwd declaration name conflict", + /* + * Negative test for btf_dedup_resolve_fwds: + * - two candidates for CU2:foo dedup, thus it is unchanged; + * - typedef/ptr should remain unchanged as well. + * + * // CU 1: + * struct foo { int x; }; + * + * // CU 2: + * struct foo; + * typedef struct foo *foo_ptr; + * + * // CU 3: + * struct foo { int x; int y; }; + */ + .input = { + .raw_types = { + /* CU 1 */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + /* CU 2 */ + BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + BTF_TYPEDEF_ENC(NAME_NTH(4), 4), /* [5] */ + /* CU 3 */ + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [6] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_MEMBER_ENC(NAME_NTH(3), 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0foo\0x\0y\0foo_ptr"), + }, + .expect = { + .raw_types = { + /* CU 1 */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 4), /* [1] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + /* CU 2 */ + BTF_FWD_ENC(NAME_NTH(1), 0), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + BTF_TYPEDEF_ENC(NAME_NTH(4), 4), /* [5] */ + /* CU 3 */ + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [6] */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + BTF_MEMBER_ENC(NAME_NTH(3), 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0foo\0x\0y\0foo_ptr"), + }, +}, }; static int btf_type_size(const struct btf_type *t) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c index 90aac437576d..d9024c7a892a 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c @@ -143,6 +143,10 @@ static void test_split_fwd_resolve() { btf__add_struct(btf1, "s2", 4); /* [5] struct s2 { */ btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ /* } */ + /* keep this not a part of type the graph to test btf_dedup_resolve_fwds */ + btf__add_struct(btf1, "s3", 4); /* [6] struct s3 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ VALIDATE_RAW_BTF( btf1, @@ -153,20 +157,24 @@ static void test_split_fwd_resolve() { "\t'f1' type_id=2 bits_offset=0\n" "\t'f2' type_id=3 bits_offset=64", "[5] STRUCT 's2' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[6] STRUCT 's3' size=4 vlen=1\n" "\t'f1' type_id=1 bits_offset=0"); btf2 = btf__new_empty_split(btf1); if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) goto cleanup; - btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [6] int */ - btf__add_ptr(btf2, 10); /* [7] ptr to struct s1 */ - btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [8] fwd for struct s2 */ - btf__add_ptr(btf2, 8); /* [9] ptr to fwd struct s2 */ - btf__add_struct(btf2, "s1", 16); /* [10] struct s1 { */ - btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */ - btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */ + btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [7] int */ + btf__add_ptr(btf2, 11); /* [8] ptr to struct s1 */ + btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [9] fwd for struct s2 */ + btf__add_ptr(btf2, 9); /* [10] ptr to fwd struct s2 */ + btf__add_struct(btf2, "s1", 16); /* [11] struct s1 { */ + btf__add_field(btf2, "f1", 8, 0, 0); /* struct s1 *f1; */ + btf__add_field(btf2, "f2", 10, 64, 0); /* struct s2 *f2; */ /* } */ + btf__add_fwd(btf2, "s3", BTF_FWD_STRUCT); /* [12] fwd for struct s3 */ + btf__add_ptr(btf2, 12); /* [13] ptr to struct s1 */ VALIDATE_RAW_BTF( btf2, @@ -178,13 +186,17 @@ static void test_split_fwd_resolve() { "\t'f2' type_id=3 bits_offset=64", "[5] STRUCT 's2' size=4 vlen=1\n" "\t'f1' type_id=1 bits_offset=0", - "[6] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", - "[7] PTR '(anon)' type_id=10", - "[8] FWD 's2' fwd_kind=struct", - "[9] PTR '(anon)' type_id=8", - "[10] STRUCT 's1' size=16 vlen=2\n" - "\t'f1' type_id=7 bits_offset=0\n" - "\t'f2' type_id=9 bits_offset=64"); + "[6] STRUCT 's3' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[7] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[8] PTR '(anon)' type_id=11", + "[9] FWD 's2' fwd_kind=struct", + "[10] PTR '(anon)' type_id=9", + "[11] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=8 bits_offset=0\n" + "\t'f2' type_id=10 bits_offset=64", + "[12] FWD 's3' fwd_kind=struct", + "[13] PTR '(anon)' type_id=12"); err = btf__dedup(btf2, NULL); if (!ASSERT_OK(err, "btf_dedup")) @@ -199,7 +211,10 @@ static void test_split_fwd_resolve() { "\t'f1' type_id=2 bits_offset=0\n" "\t'f2' type_id=3 bits_offset=64", "[5] STRUCT 's2' size=4 vlen=1\n" - "\t'f1' type_id=1 bits_offset=0"); + "\t'f1' type_id=1 bits_offset=0", + "[6] STRUCT 's3' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[7] PTR '(anon)' type_id=6"); cleanup: btf__free(btf2); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 24da335482d4..0ba2e8b9c6ac 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -791,11 +791,11 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops, sizeof(struct bpf_sock_ops) - 1, "(struct bpf_sock_ops){\n\t.op = (__u32)1,\n", - { .op = 1, .skb_tcp_flags = 2}); + { .op = 1, .skb_hwtstamp = 2}); TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops, sizeof(struct bpf_sock_ops) - 1, "(struct bpf_sock_ops){\n\t.op = (__u32)1,\n", - { .op = 1, .skb_tcp_flags = 0}); + { .op = 1, .skb_hwtstamp = 0}); } static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c index c4a2adb38da1..e02feb5fae97 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c @@ -189,6 +189,80 @@ static void test_walk_self_only(struct cgroup_iter *skel) BPF_CGROUP_ITER_SELF_ONLY, "self_only"); } +static void test_walk_dead_self_only(struct cgroup_iter *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + char expected_output[128], buf[128]; + const char *cgrp_name = "/dead"; + union bpf_iter_link_info linfo; + int len, cgrp_fd, iter_fd; + struct bpf_link *link; + size_t left; + char *p; + + cgrp_fd = create_and_get_cgroup(cgrp_name); + if (!ASSERT_GE(cgrp_fd, 0, "create cgrp")) + return; + + /* The cgroup will be dead during read() iteration, so it only has + * epilogue in the output + */ + snprintf(expected_output, sizeof(expected_output), EPILOGUE); + + memset(&linfo, 0, sizeof(linfo)); + linfo.cgroup.cgroup_fd = cgrp_fd; + linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto close_cgrp; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "iter_create")) + goto free_link; + + /* Close link fd and cgroup fd */ + bpf_link__destroy(link); + close(cgrp_fd); + + /* Remove cgroup to mark it as dead */ + remove_cgroup(cgrp_name); + + /* Two kern_sync_rcu() and usleep() pairs are used to wait for the + * releases of cgroup css, and the last kern_sync_rcu() and usleep() + * pair is used to wait for the free of cgroup itself. + */ + kern_sync_rcu(); + usleep(8000); + kern_sync_rcu(); + usleep(8000); + kern_sync_rcu(); + usleep(1000); + + memset(buf, 0, sizeof(buf)); + left = ARRAY_SIZE(buf); + p = buf; + while ((len = read(iter_fd, p, left)) > 0) { + p += len; + left -= len; + } + + ASSERT_STREQ(buf, expected_output, "dead cgroup output"); + + /* read() after iter finishes should be ok. */ + if (len == 0) + ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read"); + + close(iter_fd); + return; +free_link: + bpf_link__destroy(link); +close_cgrp: + close(cgrp_fd); +} + void test_cgroup_iter(void) { struct cgroup_iter *skel = NULL; @@ -217,6 +291,8 @@ void test_cgroup_iter(void) test_early_termination(skel); if (test__start_subtest("cgroup_iter__self_only")) test_walk_self_only(skel); + if (test__start_subtest("cgroup_iter__dead_self_only")) + test_walk_dead_self_only(skel); out: cgroup_iter__destroy(skel); cleanup_cgroups(); diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c new file mode 100644 index 000000000000..973f0c5af965 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include <cgroup_helpers.h> +#include <test_progs.h> + +#include "cgrp_kfunc_failure.skel.h" +#include "cgrp_kfunc_success.skel.h" + +static size_t log_buf_sz = 1 << 20; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void) +{ + struct cgrp_kfunc_success *skel; + int err; + + skel = cgrp_kfunc_success__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return NULL; + + skel->bss->pid = getpid(); + + err = cgrp_kfunc_success__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + return skel; + +cleanup: + cgrp_kfunc_success__destroy(skel); + return NULL; +} + +static int mkdir_rm_test_dir(void) +{ + int fd; + const char *cgrp_path = "cgrp_kfunc"; + + fd = create_and_get_cgroup(cgrp_path); + if (!ASSERT_GT(fd, 0, "mkdir_cgrp_fd")) + return -1; + + close(fd); + remove_cgroup(cgrp_path); + + return 0; +} + +static void run_success_test(const char *prog_name) +{ + struct cgrp_kfunc_success *skel; + struct bpf_program *prog; + struct bpf_link *link = NULL; + + skel = open_load_cgrp_kfunc_skel(); + if (!ASSERT_OK_PTR(skel, "open_load_skel")) + return; + + if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "attached_link")) + goto cleanup; + + ASSERT_EQ(skel->bss->invocations, 0, "pre_rmdir_count"); + if (!ASSERT_OK(mkdir_rm_test_dir(), "cgrp_mkdir")) + goto cleanup; + + ASSERT_EQ(skel->bss->invocations, 1, "post_rmdir_count"); + ASSERT_OK(skel->bss->err, "post_rmdir_err"); + +cleanup: + bpf_link__destroy(link); + cgrp_kfunc_success__destroy(skel); +} + +static const char * const success_tests[] = { + "test_cgrp_acquire_release_argument", + "test_cgrp_acquire_leave_in_map", + "test_cgrp_xchg_release", + "test_cgrp_get_release", + "test_cgrp_get_ancestors", +}; + +static struct { + const char *prog_name; + const char *expected_err_msg; +} failure_tests[] = { + {"cgrp_kfunc_acquire_untrusted", "R1 must be referenced or trusted"}, + {"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"}, + {"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"}, + {"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"}, + {"cgrp_kfunc_acquire_null", "arg#0 pointer type STRUCT cgroup must point"}, + {"cgrp_kfunc_acquire_unreleased", "Unreleased reference"}, + {"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"}, + {"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"}, + {"cgrp_kfunc_get_null", "arg#0 expected pointer to map value"}, + {"cgrp_kfunc_xchg_unreleased", "Unreleased reference"}, + {"cgrp_kfunc_get_unreleased", "Unreleased reference"}, + {"cgrp_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"}, + {"cgrp_kfunc_release_fp", "arg#0 pointer type STRUCT cgroup must point"}, + {"cgrp_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"}, + {"cgrp_kfunc_release_unacquired", "release kernel function bpf_cgroup_release expects"}, +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct cgrp_kfunc_failure *skel; + int err, i; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = cgrp_kfunc_failure__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "cgrp_kfunc_failure__open_opts")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + struct bpf_program *prog; + const char *curr_name = failure_tests[i].prog_name; + + prog = bpf_object__find_program_by_name(skel->obj, curr_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name)); + } + + err = cgrp_kfunc_failure__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + cgrp_kfunc_failure__destroy(skel); +} + +void test_cgrp_kfunc(void) +{ + int i, err; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "cgrp_env_setup")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { + if (!test__start_subtest(success_tests[i])) + continue; + + run_success_test(success_tests[i]); + } + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + if (!test__start_subtest(failure_tests[i].prog_name)) + continue; + + verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); + } + +cleanup: + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c new file mode 100644 index 000000000000..1c30412ba132 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/ + +#define _GNU_SOURCE +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <test_progs.h> +#include "cgrp_ls_tp_btf.skel.h" +#include "cgrp_ls_recursion.skel.h" +#include "cgrp_ls_attach_cgroup.skel.h" +#include "cgrp_ls_negative.skel.h" +#include "network_helpers.h" + +struct socket_cookie { + __u64 cookie_key; + __u32 cookie_value; +}; + +static void test_tp_btf(int cgroup_fd) +{ + struct cgrp_ls_tp_btf *skel; + long val1 = 1, val2 = 0; + int err; + + skel = cgrp_ls_tp_btf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + /* populate a value in map_b */ + err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY); + if (!ASSERT_OK(err, "map_update_elem")) + goto out; + + /* check value */ + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2); + if (!ASSERT_OK(err, "map_lookup_elem")) + goto out; + if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val")) + goto out; + + /* delete value */ + err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd); + if (!ASSERT_OK(err, "map_delete_elem")) + goto out; + + skel->bss->target_pid = syscall(SYS_gettid); + + err = cgrp_ls_tp_btf__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + syscall(SYS_gettid); + syscall(SYS_gettid); + + skel->bss->target_pid = 0; + + /* 3x syscalls: 1x attach and 2x gettid */ + ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt"); + ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt"); + ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt"); +out: + cgrp_ls_tp_btf__destroy(skel); +} + +static void test_attach_cgroup(int cgroup_fd) +{ + int server_fd = 0, client_fd = 0, err = 0; + socklen_t addr_len = sizeof(struct sockaddr_in6); + struct cgrp_ls_attach_cgroup *skel; + __u32 cookie_expected_value; + struct sockaddr_in6 addr; + struct socket_cookie val; + + skel = cgrp_ls_attach_cgroup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->links.set_cookie = bpf_program__attach_cgroup( + skel->progs.set_cookie, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach")) + goto out; + + skel->links.update_cookie_sockops = bpf_program__attach_cgroup( + skel->progs.update_cookie_sockops, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach")) + goto out; + + skel->links.update_cookie_tracing = bpf_program__attach( + skel->progs.update_cookie_tracing); + if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach")) + goto out; + + server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_server")) + goto out; + + client_fd = connect_to_fd(server_fd, 0); + if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) + goto close_server_fd; + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies), + &cgroup_fd, &val); + if (!ASSERT_OK(err, "map_lookup(socket_cookies)")) + goto close_client_fd; + + err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len); + if (!ASSERT_OK(err, "getsockname")) + goto close_client_fd; + + cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF; + ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value"); + +close_client_fd: + close(client_fd); +close_server_fd: + close(server_fd); +out: + cgrp_ls_attach_cgroup__destroy(skel); +} + +static void test_recursion(int cgroup_fd) +{ + struct cgrp_ls_recursion *skel; + int err; + + skel = cgrp_ls_recursion__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + err = cgrp_ls_recursion__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + /* trigger sys_enter, make sure it does not cause deadlock */ + syscall(SYS_gettid); + +out: + cgrp_ls_recursion__destroy(skel); +} + +static void test_negative(void) +{ + struct cgrp_ls_negative *skel; + + skel = cgrp_ls_negative__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) { + cgrp_ls_negative__destroy(skel); + return; + } +} + +void test_cgrp_local_storage(void) +{ + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/cgrp_local_storage"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage")) + return; + + if (test__start_subtest("tp_btf")) + test_tp_btf(cgroup_fd); + if (test__start_subtest("attach_cgroup")) + test_attach_cgroup(cgroup_fd); + if (test__start_subtest("recursion")) + test_recursion(cgroup_fd); + if (test__start_subtest("negative")) + test_negative(); + + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index 8fc4e6c02bfd..b0c06f821cb8 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -17,7 +17,7 @@ static struct { {"ringbuf_missing_release2", "Unreleased reference id=2"}, {"ringbuf_missing_release_callback", "Unreleased reference id"}, {"use_after_invalid", "Expected an initialized dynptr as arg #3"}, - {"ringbuf_invalid_api", "type=mem expected=alloc_mem"}, + {"ringbuf_invalid_api", "type=mem expected=ringbuf_mem"}, {"add_dynptr_to_map1", "invalid indirect read from stack"}, {"add_dynptr_to_map2", "invalid indirect read from stack"}, {"data_slice_out_of_bounds_ringbuf", "value is outside of the allowed memory range"}, diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c new file mode 100644 index 000000000000..0613f3bb8b5e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <net/if.h> +#include "empty_skb.skel.h" + +#define SYS(cmd) ({ \ + if (!ASSERT_OK(system(cmd), (cmd))) \ + goto out; \ +}) + +void serial_test_empty_skb(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, tattr); + struct empty_skb *bpf_obj = NULL; + struct nstoken *tok = NULL; + struct bpf_program *prog; + char eth_hlen_pp[15]; + char eth_hlen[14]; + int veth_ifindex; + int ipip_ifindex; + int err; + int i; + + struct { + const char *msg; + const void *data_in; + __u32 data_size_in; + int *ifindex; + int err; + int ret; + bool success_on_tc; + } tests[] = { + /* Empty packets are always rejected. */ + + { + /* BPF_PROG_RUN ETH_HLEN size check */ + .msg = "veth empty ingress packet", + .data_in = NULL, + .data_size_in = 0, + .ifindex = &veth_ifindex, + .err = -EINVAL, + }, + { + /* BPF_PROG_RUN ETH_HLEN size check */ + .msg = "ipip empty ingress packet", + .data_in = NULL, + .data_size_in = 0, + .ifindex = &ipip_ifindex, + .err = -EINVAL, + }, + + /* ETH_HLEN-sized packets: + * - can not be redirected at LWT_XMIT + * - can be redirected at TC to non-tunneling dest + */ + + { + /* __bpf_redirect_common */ + .msg = "veth ETH_HLEN packet ingress", + .data_in = eth_hlen, + .data_size_in = sizeof(eth_hlen), + .ifindex = &veth_ifindex, + .ret = -ERANGE, + .success_on_tc = true, + }, + { + /* __bpf_redirect_no_mac + * + * lwt: skb->len=0 <= skb_network_offset=0 + * tc: skb->len=14 <= skb_network_offset=14 + */ + .msg = "ipip ETH_HLEN packet ingress", + .data_in = eth_hlen, + .data_size_in = sizeof(eth_hlen), + .ifindex = &ipip_ifindex, + .ret = -ERANGE, + }, + + /* ETH_HLEN+1-sized packet should be redirected. */ + + { + .msg = "veth ETH_HLEN+1 packet ingress", + .data_in = eth_hlen_pp, + .data_size_in = sizeof(eth_hlen_pp), + .ifindex = &veth_ifindex, + }, + { + .msg = "ipip ETH_HLEN+1 packet ingress", + .data_in = eth_hlen_pp, + .data_size_in = sizeof(eth_hlen_pp), + .ifindex = &ipip_ifindex, + }, + }; + + SYS("ip netns add empty_skb"); + tok = open_netns("empty_skb"); + SYS("ip link add veth0 type veth peer veth1"); + SYS("ip link set dev veth0 up"); + SYS("ip link set dev veth1 up"); + SYS("ip addr add 10.0.0.1/8 dev veth0"); + SYS("ip addr add 10.0.0.2/8 dev veth1"); + veth_ifindex = if_nametoindex("veth0"); + + SYS("ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2"); + SYS("ip link set ipip0 up"); + SYS("ip addr add 192.168.1.1/16 dev ipip0"); + ipip_ifindex = if_nametoindex("ipip0"); + + bpf_obj = empty_skb__open_and_load(); + if (!ASSERT_OK_PTR(bpf_obj, "open skeleton")) + goto out; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + bpf_object__for_each_program(prog, bpf_obj->obj) { + char buf[128]; + bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2); + + tattr.data_in = tests[i].data_in; + tattr.data_size_in = tests[i].data_size_in; + + tattr.data_size_out = 0; + bpf_obj->bss->ifindex = *tests[i].ifindex; + bpf_obj->bss->ret = 0; + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &tattr); + sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog)); + + if (at_tc && tests[i].success_on_tc) + ASSERT_GE(err, 0, buf); + else + ASSERT_EQ(err, tests[i].err, buf); + sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog)); + if (at_tc && tests[i].success_on_tc) + ASSERT_GE(bpf_obj->bss->ret, 0, buf); + else + ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf); + } + } + +out: + if (bpf_obj) + empty_skb__destroy(bpf_obj); + if (tok) + close_netns(tok); + system("ip netns del empty_skb"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/hashmap.c b/tools/testing/selftests/bpf/prog_tests/hashmap.c index 4747ab18f97f..d358a223fd2d 100644 --- a/tools/testing/selftests/bpf/prog_tests/hashmap.c +++ b/tools/testing/selftests/bpf/prog_tests/hashmap.c @@ -7,17 +7,18 @@ */ #include "test_progs.h" #include "bpf/hashmap.h" +#include <stddef.h> static int duration = 0; -static size_t hash_fn(const void *k, void *ctx) +static size_t hash_fn(long k, void *ctx) { - return (long)k; + return k; } -static bool equal_fn(const void *a, const void *b, void *ctx) +static bool equal_fn(long a, long b, void *ctx) { - return (long)a == (long)b; + return a == b; } static inline size_t next_pow_2(size_t n) @@ -52,8 +53,8 @@ static void test_hashmap_generic(void) return; for (i = 0; i < ELEM_CNT; i++) { - const void *oldk, *k = (const void *)(long)i; - void *oldv, *v = (void *)(long)(1024 + i); + long oldk, k = i; + long oldv, v = 1024 + i; err = hashmap__update(map, k, v, &oldk, &oldv); if (CHECK(err != -ENOENT, "hashmap__update", @@ -64,20 +65,18 @@ static void test_hashmap_generic(void) err = hashmap__add(map, k, v); } else { err = hashmap__set(map, k, v, &oldk, &oldv); - if (CHECK(oldk != NULL || oldv != NULL, "check_kv", - "unexpected k/v: %p=%p\n", oldk, oldv)) + if (CHECK(oldk != 0 || oldv != 0, "check_kv", + "unexpected k/v: %ld=%ld\n", oldk, oldv)) goto cleanup; } - if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n", - (long)k, (long)v, err)) + if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n", k, v, err)) goto cleanup; if (CHECK(!hashmap__find(map, k, &oldv), "elem_find", - "failed to find key %ld\n", (long)k)) + "failed to find key %ld\n", k)) goto cleanup; - if (CHECK(oldv != v, "elem_val", - "found value is wrong: %ld\n", (long)oldv)) + if (CHECK(oldv != v, "elem_val", "found value is wrong: %ld\n", oldv)) goto cleanup; } @@ -91,8 +90,8 @@ static void test_hashmap_generic(void) found_msk = 0; hashmap__for_each_entry(map, entry, bkt) { - long k = (long)entry->key; - long v = (long)entry->value; + long k = entry->key; + long v = entry->value; found_msk |= 1ULL << k; if (CHECK(v - k != 1024, "check_kv", @@ -104,8 +103,8 @@ static void test_hashmap_generic(void) goto cleanup; for (i = 0; i < ELEM_CNT; i++) { - const void *oldk, *k = (const void *)(long)i; - void *oldv, *v = (void *)(long)(256 + i); + long oldk, k = i; + long oldv, v = 256 + i; err = hashmap__add(map, k, v); if (CHECK(err != -EEXIST, "hashmap__add", @@ -119,13 +118,13 @@ static void test_hashmap_generic(void) if (CHECK(err, "elem_upd", "failed to update k/v %ld = %ld: %d\n", - (long)k, (long)v, err)) + k, v, err)) goto cleanup; if (CHECK(!hashmap__find(map, k, &oldv), "elem_find", - "failed to find key %ld\n", (long)k)) + "failed to find key %ld\n", k)) goto cleanup; if (CHECK(oldv != v, "elem_val", - "found value is wrong: %ld\n", (long)oldv)) + "found value is wrong: %ld\n", oldv)) goto cleanup; } @@ -139,8 +138,8 @@ static void test_hashmap_generic(void) found_msk = 0; hashmap__for_each_entry_safe(map, entry, tmp, bkt) { - long k = (long)entry->key; - long v = (long)entry->value; + long k = entry->key; + long v = entry->value; found_msk |= 1ULL << k; if (CHECK(v - k != 256, "elem_check", @@ -152,7 +151,7 @@ static void test_hashmap_generic(void) goto cleanup; found_cnt = 0; - hashmap__for_each_key_entry(map, entry, (void *)0) { + hashmap__for_each_key_entry(map, entry, 0) { found_cnt++; } if (CHECK(!found_cnt, "found_cnt", @@ -161,27 +160,25 @@ static void test_hashmap_generic(void) found_msk = 0; found_cnt = 0; - hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) { - const void *oldk, *k; - void *oldv, *v; + hashmap__for_each_key_entry_safe(map, entry, tmp, 0) { + long oldk, k; + long oldv, v; k = entry->key; v = entry->value; found_cnt++; - found_msk |= 1ULL << (long)k; + found_msk |= 1ULL << k; if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del", - "failed to delete k/v %ld = %ld\n", - (long)k, (long)v)) + "failed to delete k/v %ld = %ld\n", k, v)) goto cleanup; if (CHECK(oldk != k || oldv != v, "check_old", "invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n", - (long)k, (long)v, (long)oldk, (long)oldv)) + k, v, oldk, oldv)) goto cleanup; if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del", - "unexpectedly deleted k/v %ld = %ld\n", - (long)oldk, (long)oldv)) + "unexpectedly deleted k/v %ld = %ld\n", oldk, oldv)) goto cleanup; } @@ -198,26 +195,24 @@ static void test_hashmap_generic(void) goto cleanup; hashmap__for_each_entry_safe(map, entry, tmp, bkt) { - const void *oldk, *k; - void *oldv, *v; + long oldk, k; + long oldv, v; k = entry->key; v = entry->value; found_cnt++; - found_msk |= 1ULL << (long)k; + found_msk |= 1ULL << k; if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del", - "failed to delete k/v %ld = %ld\n", - (long)k, (long)v)) + "failed to delete k/v %ld = %ld\n", k, v)) goto cleanup; if (CHECK(oldk != k || oldv != v, "elem_check", "invalid old k/v: expect %ld = %ld, got %ld = %ld\n", - (long)k, (long)v, (long)oldk, (long)oldv)) + k, v, oldk, oldv)) goto cleanup; if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del", - "unexpectedly deleted k/v %ld = %ld\n", - (long)k, (long)v)) + "unexpectedly deleted k/v %ld = %ld\n", k, v)) goto cleanup; } @@ -235,7 +230,7 @@ static void test_hashmap_generic(void) hashmap__for_each_entry(map, entry, bkt) { CHECK(false, "elem_exists", "unexpected map entries left: %ld = %ld\n", - (long)entry->key, (long)entry->value); + entry->key, entry->value); goto cleanup; } @@ -243,22 +238,107 @@ static void test_hashmap_generic(void) hashmap__for_each_entry(map, entry, bkt) { CHECK(false, "elem_exists", "unexpected map entries left: %ld = %ld\n", - (long)entry->key, (long)entry->value); + entry->key, entry->value); + goto cleanup; + } + +cleanup: + hashmap__free(map); +} + +static size_t str_hash_fn(long a, void *ctx) +{ + return str_hash((char *)a); +} + +static bool str_equal_fn(long a, long b, void *ctx) +{ + return strcmp((char *)a, (char *)b) == 0; +} + +/* Verify that hashmap interface works with pointer keys and values */ +static void test_hashmap_ptr_iface(void) +{ + const char *key, *value, *old_key, *old_value; + struct hashmap_entry *cur; + struct hashmap *map; + int err, i, bkt; + + map = hashmap__new(str_hash_fn, str_equal_fn, NULL); + if (CHECK(!map, "hashmap__new", "can't allocate hashmap\n")) goto cleanup; + +#define CHECK_STR(fn, var, expected) \ + CHECK(strcmp(var, (expected)), (fn), \ + "wrong value of " #var ": '%s' instead of '%s'\n", var, (expected)) + + err = hashmap__insert(map, "a", "apricot", HASHMAP_ADD, NULL, NULL); + if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err)) + goto cleanup; + + err = hashmap__insert(map, "a", "apple", HASHMAP_SET, &old_key, &old_value); + if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err)) + goto cleanup; + CHECK_STR("hashmap__update", old_key, "a"); + CHECK_STR("hashmap__update", old_value, "apricot"); + + err = hashmap__add(map, "b", "banana"); + if (CHECK(err, "hashmap__add", "unexpected error: %d\n", err)) + goto cleanup; + + err = hashmap__set(map, "b", "breadfruit", &old_key, &old_value); + if (CHECK(err, "hashmap__set", "unexpected error: %d\n", err)) + goto cleanup; + CHECK_STR("hashmap__set", old_key, "b"); + CHECK_STR("hashmap__set", old_value, "banana"); + + err = hashmap__update(map, "b", "blueberry", &old_key, &old_value); + if (CHECK(err, "hashmap__update", "unexpected error: %d\n", err)) + goto cleanup; + CHECK_STR("hashmap__update", old_key, "b"); + CHECK_STR("hashmap__update", old_value, "breadfruit"); + + err = hashmap__append(map, "c", "cherry"); + if (CHECK(err, "hashmap__append", "unexpected error: %d\n", err)) + goto cleanup; + + if (CHECK(!hashmap__delete(map, "c", &old_key, &old_value), + "hashmap__delete", "expected to have entry for 'c'\n")) + goto cleanup; + CHECK_STR("hashmap__delete", old_key, "c"); + CHECK_STR("hashmap__delete", old_value, "cherry"); + + CHECK(!hashmap__find(map, "b", &value), "hashmap__find", "can't find value for 'b'\n"); + CHECK_STR("hashmap__find", value, "blueberry"); + + if (CHECK(!hashmap__delete(map, "b", NULL, NULL), + "hashmap__delete", "expected to have entry for 'b'\n")) + goto cleanup; + + i = 0; + hashmap__for_each_entry(map, cur, bkt) { + if (CHECK(i != 0, "hashmap__for_each_entry", "too many entries")) + goto cleanup; + key = cur->pkey; + value = cur->pvalue; + CHECK_STR("entry", key, "a"); + CHECK_STR("entry", value, "apple"); + i++; } +#undef CHECK_STR cleanup: hashmap__free(map); } -static size_t collision_hash_fn(const void *k, void *ctx) +static size_t collision_hash_fn(long k, void *ctx) { return 0; } static void test_hashmap_multimap(void) { - void *k1 = (void *)0, *k2 = (void *)1; + long k1 = 0, k2 = 1; struct hashmap_entry *entry; struct hashmap *map; long found_msk; @@ -273,23 +353,23 @@ static void test_hashmap_multimap(void) * [0] -> 1, 2, 4; * [1] -> 8, 16, 32; */ - err = hashmap__append(map, k1, (void *)1); + err = hashmap__append(map, k1, 1); if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) goto cleanup; - err = hashmap__append(map, k1, (void *)2); + err = hashmap__append(map, k1, 2); if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) goto cleanup; - err = hashmap__append(map, k1, (void *)4); + err = hashmap__append(map, k1, 4); if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) goto cleanup; - err = hashmap__append(map, k2, (void *)8); + err = hashmap__append(map, k2, 8); if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) goto cleanup; - err = hashmap__append(map, k2, (void *)16); + err = hashmap__append(map, k2, 16); if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) goto cleanup; - err = hashmap__append(map, k2, (void *)32); + err = hashmap__append(map, k2, 32); if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) goto cleanup; @@ -300,7 +380,7 @@ static void test_hashmap_multimap(void) /* verify global iteration still works and sees all values */ found_msk = 0; hashmap__for_each_entry(map, entry, bkt) { - found_msk |= (long)entry->value; + found_msk |= entry->value; } if (CHECK(found_msk != (1 << 6) - 1, "found_msk", "not all keys iterated: %lx\n", found_msk)) @@ -309,7 +389,7 @@ static void test_hashmap_multimap(void) /* iterate values for key 1 */ found_msk = 0; hashmap__for_each_key_entry(map, entry, k1) { - found_msk |= (long)entry->value; + found_msk |= entry->value; } if (CHECK(found_msk != (1 | 2 | 4), "found_msk", "invalid k1 values: %lx\n", found_msk)) @@ -318,7 +398,7 @@ static void test_hashmap_multimap(void) /* iterate values for key 2 */ found_msk = 0; hashmap__for_each_key_entry(map, entry, k2) { - found_msk |= (long)entry->value; + found_msk |= entry->value; } if (CHECK(found_msk != (8 | 16 | 32), "found_msk", "invalid k2 values: %lx\n", found_msk)) @@ -333,7 +413,7 @@ static void test_hashmap_empty() struct hashmap_entry *entry; int bkt; struct hashmap *map; - void *k = (void *)0; + long k = 0; /* force collisions */ map = hashmap__new(hash_fn, equal_fn, NULL); @@ -374,4 +454,6 @@ void test_hashmap() test_hashmap_multimap(); if (test__start_subtest("empty")) test_hashmap_empty(); + if (test__start_subtest("ptr_iface")) + test_hashmap_ptr_iface(); } diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c index c210657d4d0a..55d641c1f126 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c @@ -22,7 +22,7 @@ static struct { "arg#0 pointer type STRUCT bpf_dynptr_kern points to unsupported dynamic pointer type", 0}, {"not_valid_dynptr", "arg#0 pointer type STRUCT bpf_dynptr_kern must be valid and initialized", 0}, - {"not_ptr_to_stack", "arg#0 pointer type STRUCT bpf_dynptr_kern not to stack", 0}, + {"not_ptr_to_stack", "arg#0 expected pointer to stack", 0}, {"dynptr_data_null", NULL, -EBADMSG}, }; diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index a4b4133d39e9..c6f37e825f11 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -312,12 +312,12 @@ static inline __u64 get_time_ns(void) return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; } -static size_t symbol_hash(const void *key, void *ctx __maybe_unused) +static size_t symbol_hash(long key, void *ctx __maybe_unused) { return str_hash((const char *) key); } -static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_unused) +static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused) { return strcmp((const char *) key1, (const char *) key2) == 0; } @@ -325,7 +325,7 @@ static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_u static int get_syms(char ***symsp, size_t *cntp) { size_t cap = 0, cnt = 0, i; - char *name, **syms = NULL; + char *name = NULL, **syms = NULL; struct hashmap *map; char buf[256]; FILE *f; @@ -352,6 +352,8 @@ static int get_syms(char ***symsp, size_t *cntp) /* skip modules */ if (strchr(buf, '[')) continue; + + free(name); if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1) continue; /* @@ -371,32 +373,32 @@ static int get_syms(char ***symsp, size_t *cntp) if (!strncmp(name, "__ftrace_invalid_address__", sizeof("__ftrace_invalid_address__") - 1)) continue; - err = hashmap__add(map, name, NULL); - if (err) { - free(name); - if (err == -EEXIST) - continue; + + err = hashmap__add(map, name, 0); + if (err == -EEXIST) + continue; + if (err) goto error; - } + err = libbpf_ensure_mem((void **) &syms, &cap, sizeof(*syms), cnt + 1); - if (err) { - free(name); + if (err) goto error; - } - syms[cnt] = name; - cnt++; + + syms[cnt++] = name; + name = NULL; } *symsp = syms; *cntp = cnt; error: + free(name); fclose(f); hashmap__free(map); if (err) { for (i = 0; i < cnt; i++) - free(syms[cnt]); + free(syms[i]); free(syms); } return err; diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c new file mode 100644 index 000000000000..1fbe7e4ac00a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "kprobe_multi.skel.h" +#include "trace_helpers.h" +#include "bpf/libbpf_internal.h" + +static void kprobe_multi_testmod_check(struct kprobe_multi *skel) +{ + ASSERT_EQ(skel->bss->kprobe_testmod_test1_result, 1, "kprobe_test1_result"); + ASSERT_EQ(skel->bss->kprobe_testmod_test2_result, 1, "kprobe_test2_result"); + ASSERT_EQ(skel->bss->kprobe_testmod_test3_result, 1, "kprobe_test3_result"); + + ASSERT_EQ(skel->bss->kretprobe_testmod_test1_result, 1, "kretprobe_test1_result"); + ASSERT_EQ(skel->bss->kretprobe_testmod_test2_result, 1, "kretprobe_test2_result"); + ASSERT_EQ(skel->bss->kretprobe_testmod_test3_result, 1, "kretprobe_test3_result"); +} + +static void test_testmod_attach_api(struct bpf_kprobe_multi_opts *opts) +{ + struct kprobe_multi *skel = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + return; + + skel->bss->pid = getpid(); + + skel->links.test_kprobe_testmod = bpf_program__attach_kprobe_multi_opts( + skel->progs.test_kprobe_testmod, + NULL, opts); + if (!skel->links.test_kprobe_testmod) + goto cleanup; + + opts->retprobe = true; + skel->links.test_kretprobe_testmod = bpf_program__attach_kprobe_multi_opts( + skel->progs.test_kretprobe_testmod, + NULL, opts); + if (!skel->links.test_kretprobe_testmod) + goto cleanup; + + ASSERT_OK(trigger_module_test_read(1), "trigger_read"); + kprobe_multi_testmod_check(skel); + +cleanup: + kprobe_multi__destroy(skel); +} + +static void test_testmod_attach_api_addrs(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + unsigned long long addrs[3]; + + addrs[0] = ksym_get_addr("bpf_testmod_fentry_test1"); + ASSERT_NEQ(addrs[0], 0, "ksym_get_addr"); + addrs[1] = ksym_get_addr("bpf_testmod_fentry_test2"); + ASSERT_NEQ(addrs[1], 0, "ksym_get_addr"); + addrs[2] = ksym_get_addr("bpf_testmod_fentry_test3"); + ASSERT_NEQ(addrs[2], 0, "ksym_get_addr"); + + opts.addrs = (const unsigned long *) addrs; + opts.cnt = ARRAY_SIZE(addrs); + + test_testmod_attach_api(&opts); +} + +static void test_testmod_attach_api_syms(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + const char *syms[3] = { + "bpf_testmod_fentry_test1", + "bpf_testmod_fentry_test2", + "bpf_testmod_fentry_test3", + }; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + test_testmod_attach_api(&opts); +} + +void serial_test_kprobe_multi_testmod_test(void) +{ + if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh")) + return; + + if (test__start_subtest("testmod_attach_api_syms")) + test_testmod_attach_api_syms(); + if (test__start_subtest("testmod_attach_api_addrs")) + test_testmod_attach_api_addrs(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c new file mode 100644 index 000000000000..25e5dfa9c315 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include <test_progs.h> + +#include "test_libbpf_get_fd_by_id_opts.skel.h" + +void test_libbpf_get_fd_by_id_opts(void) +{ + struct test_libbpf_get_fd_by_id_opts *skel; + struct bpf_map_info info_m = {}; + __u32 len = sizeof(info_m), value; + int ret, zero = 0, fd = -1; + LIBBPF_OPTS(bpf_get_fd_by_id_opts, fd_opts_rdonly, + .open_flags = BPF_F_RDONLY, + ); + + skel = test_libbpf_get_fd_by_id_opts__open_and_load(); + if (!ASSERT_OK_PTR(skel, + "test_libbpf_get_fd_by_id_opts__open_and_load")) + return; + + ret = test_libbpf_get_fd_by_id_opts__attach(skel); + if (!ASSERT_OK(ret, "test_libbpf_get_fd_by_id_opts__attach")) + goto close_prog; + + ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.data_input), + &info_m, &len); + if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd")) + goto close_prog; + + fd = bpf_map_get_fd_by_id(info_m.id); + if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id")) + goto close_prog; + + fd = bpf_map_get_fd_by_id_opts(info_m.id, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_get_fd_by_id_opts")) + goto close_prog; + + fd = bpf_map_get_fd_by_id_opts(info_m.id, &fd_opts_rdonly); + if (!ASSERT_GE(fd, 0, "bpf_map_get_fd_by_id_opts")) + goto close_prog; + + /* Map lookup should work with read-only fd. */ + ret = bpf_map_lookup_elem(fd, &zero, &value); + if (!ASSERT_OK(ret, "bpf_map_lookup_elem")) + goto close_prog; + + if (!ASSERT_EQ(value, 0, "map value mismatch")) + goto close_prog; + + /* Map update should not work with read-only fd. */ + ret = bpf_map_update_elem(fd, &zero, &len, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem")) + goto close_prog; + + /* Map update should work with read-write fd. */ + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.data_input), &zero, + &len, BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem")) + goto close_prog; + + /* Prog get fd with opts set should not work (no kernel support). */ + ret = bpf_prog_get_fd_by_id_opts(0, &fd_opts_rdonly); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_prog_get_fd_by_id_opts")) + goto close_prog; + + /* Link get fd with opts set should not work (no kernel support). */ + ret = bpf_link_get_fd_by_id_opts(0, &fd_opts_rdonly); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_link_get_fd_by_id_opts")) + goto close_prog; + + /* BTF get fd with opts set should not work (no kernel support). */ + ret = bpf_btf_get_fd_by_id_opts(0, &fd_opts_rdonly); + ASSERT_EQ(ret, -EINVAL, "bpf_btf_get_fd_by_id_opts"); + +close_prog: + if (fd >= 0) + close(fd); + + test_libbpf_get_fd_by_id_opts__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c index 93e9cddaadcf..efb8bd43653c 100644 --- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c @@ -139,6 +139,14 @@ static void test_libbpf_bpf_map_type_str(void) snprintf(buf, sizeof(buf), "BPF_MAP_TYPE_%s", map_type_str); uppercase(buf); + /* Special case for map_type_name BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED + * where it and BPF_MAP_TYPE_CGROUP_STORAGE have the same enum value + * (map_type). For this enum value, libbpf_bpf_map_type_str() picks + * BPF_MAP_TYPE_CGROUP_STORAGE. + */ + if (strcmp(map_type_name, "BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED") == 0) + continue; + ASSERT_STREQ(buf, map_type_name, "exp_str_value"); } diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c new file mode 100644 index 000000000000..9a7d4c47af63 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -0,0 +1,740 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <bpf/btf.h> +#include <test_btf.h> +#include <linux/btf.h> +#include <test_progs.h> +#include <network_helpers.h> + +#include "linked_list.skel.h" +#include "linked_list_fail.skel.h" + +static char log_buf[1024 * 1024]; + +static struct { + const char *prog_name; + const char *err_msg; +} linked_list_fail_tests[] = { +#define TEST(test, off) \ + { #test "_missing_lock_push_front", \ + "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \ + { #test "_missing_lock_push_back", \ + "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \ + { #test "_missing_lock_pop_front", \ + "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \ + { #test "_missing_lock_pop_back", \ + "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, + TEST(kptr, 32) + TEST(global, 16) + TEST(map, 0) + TEST(inner_map, 0) +#undef TEST +#define TEST(test, op) \ + { #test "_kptr_incorrect_lock_" #op, \ + "held lock and object are not in the same allocation\n" \ + "bpf_spin_lock at off=32 must be held for bpf_list_head" }, \ + { #test "_global_incorrect_lock_" #op, \ + "held lock and object are not in the same allocation\n" \ + "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \ + { #test "_map_incorrect_lock_" #op, \ + "held lock and object are not in the same allocation\n" \ + "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \ + { #test "_inner_map_incorrect_lock_" #op, \ + "held lock and object are not in the same allocation\n" \ + "bpf_spin_lock at off=0 must be held for bpf_list_head" }, + TEST(kptr, push_front) + TEST(kptr, push_back) + TEST(kptr, pop_front) + TEST(kptr, pop_back) + TEST(global, push_front) + TEST(global, push_back) + TEST(global, pop_front) + TEST(global, pop_back) + TEST(map, push_front) + TEST(map, push_back) + TEST(map, pop_front) + TEST(map, pop_back) + TEST(inner_map, push_front) + TEST(inner_map, push_back) + TEST(inner_map, pop_front) + TEST(inner_map, pop_back) +#undef TEST + { "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_tp", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_perf", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" }, + { "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" }, + { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" }, + { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" }, + { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" }, + { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" }, + { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" }, + { "obj_new_acq", "Unreleased reference id=" }, + { "use_after_drop", "invalid mem access 'scalar'" }, + { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" }, + { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" }, + { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" }, + { "direct_read_head", "direct access to bpf_list_head is disallowed" }, + { "direct_write_head", "direct access to bpf_list_head is disallowed" }, + { "direct_read_node", "direct access to bpf_list_node is disallowed" }, + { "direct_write_node", "direct access to bpf_list_node is disallowed" }, + { "write_after_push_front", "only read is supported" }, + { "write_after_push_back", "only read is supported" }, + { "use_after_unlock_push_front", "invalid mem access 'scalar'" }, + { "use_after_unlock_push_back", "invalid mem access 'scalar'" }, + { "double_push_front", "arg#1 expected pointer to allocated object" }, + { "double_push_back", "arg#1 expected pointer to allocated object" }, + { "no_node_value_type", "bpf_list_node not found at offset=0" }, + { "incorrect_value_type", + "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, " + "but arg is at offset=0 in struct bar" }, + { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, + { "incorrect_node_off1", "bpf_list_node not found at offset=1" }, + { "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" }, + { "no_head_type", "bpf_list_head not found at offset=0" }, + { "incorrect_head_var_off1", "R1 doesn't have constant offset" }, + { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, + { "incorrect_head_off1", "bpf_list_head not found at offset=17" }, + { "incorrect_head_off2", "bpf_list_head not found at offset=1" }, + { "pop_front_off", + "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " + "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" + "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, + { "pop_back_off", + "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " + "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" + "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, +}; + +static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct linked_list_fail *skel; + struct bpf_program *prog; + int ret; + + skel = linked_list_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto end; + + bpf_program__set_autoload(prog, true); + + ret = linked_list_fail__load(skel); + if (!ASSERT_ERR(ret, "linked_list_fail__load must fail")) + goto end; + + if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + +end: + linked_list_fail__destroy(skel); +} + +static void clear_fields(struct bpf_map *map) +{ + char buf[24]; + int key = 0; + + memset(buf, 0xff, sizeof(buf)); + ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields"); +} + +enum { + TEST_ALL, + PUSH_POP, + PUSH_POP_MULT, + LIST_IN_LIST, +}; + +static void test_linked_list_success(int mode, bool leave_in_map) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct linked_list *skel; + int ret; + + skel = linked_list__open_and_load(); + if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load")) + return; + + if (mode == LIST_IN_LIST) + goto lil; + if (mode == PUSH_POP_MULT) + goto ppm; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts); + ASSERT_OK(ret, "map_list_push_pop"); + ASSERT_OK(opts.retval, "map_list_push_pop retval"); + if (!leave_in_map) + clear_fields(skel->maps.array_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts); + ASSERT_OK(ret, "inner_map_list_push_pop"); + ASSERT_OK(opts.retval, "inner_map_list_push_pop retval"); + if (!leave_in_map) + clear_fields(skel->maps.inner_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts); + ASSERT_OK(ret, "global_list_push_pop"); + ASSERT_OK(opts.retval, "global_list_push_pop retval"); + if (!leave_in_map) + clear_fields(skel->maps.bss_A); + + if (mode == PUSH_POP) + goto end; + +ppm: + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts); + ASSERT_OK(ret, "map_list_push_pop_multiple"); + ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval"); + if (!leave_in_map) + clear_fields(skel->maps.array_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts); + ASSERT_OK(ret, "inner_map_list_push_pop_multiple"); + ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval"); + if (!leave_in_map) + clear_fields(skel->maps.inner_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts); + ASSERT_OK(ret, "global_list_push_pop_multiple"); + ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval"); + if (!leave_in_map) + clear_fields(skel->maps.bss_A); + + if (mode == PUSH_POP_MULT) + goto end; + +lil: + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts); + ASSERT_OK(ret, "map_list_in_list"); + ASSERT_OK(opts.retval, "map_list_in_list retval"); + if (!leave_in_map) + clear_fields(skel->maps.array_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts); + ASSERT_OK(ret, "inner_map_list_in_list"); + ASSERT_OK(opts.retval, "inner_map_list_in_list retval"); + if (!leave_in_map) + clear_fields(skel->maps.inner_map); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts); + ASSERT_OK(ret, "global_list_in_list"); + ASSERT_OK(opts.retval, "global_list_in_list retval"); + if (!leave_in_map) + clear_fields(skel->maps.bss_A); +end: + linked_list__destroy(skel); +} + +#define SPIN_LOCK 2 +#define LIST_HEAD 3 +#define LIST_NODE 4 + +static struct btf *init_btf(void) +{ + int id, lid, hid, nid; + struct btf *btf; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "btf__new_empty")) + return NULL; + id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED); + if (!ASSERT_EQ(id, 1, "btf__add_int")) + goto end; + lid = btf__add_struct(btf, "bpf_spin_lock", 4); + if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock")) + goto end; + hid = btf__add_struct(btf, "bpf_list_head", 16); + if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head")) + goto end; + nid = btf__add_struct(btf, "bpf_list_node", 16); + if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node")) + goto end; + return btf; +end: + btf__free(btf); + return NULL; +} + +static void test_btf(void) +{ + struct btf *btf = NULL; + int id, err; + + while (test__start_subtest("btf: too many locks")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_struct foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0); + if (!ASSERT_OK(err, "btf__add_struct foo::a")) + break; + err = btf__add_field(btf, "c", LIST_HEAD, 64, 0); + if (!ASSERT_OK(err, "btf__add_struct foo::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -E2BIG, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: missing lock")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 16); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_struct foo::a")) + break; + id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a")) + break; + id = btf__add_struct(btf, "baz", 16); + if (!ASSERT_EQ(id, 7, "btf__add_struct baz")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field baz::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: bad offset")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 36); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EEXIST, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: missing contains:")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_HEAD, 64, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: missing struct")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_HEAD, 64, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ENOENT, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: missing node")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 24); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_HEAD, 64, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c")) + break; + + err = btf__load_into_kernel(btf); + btf__free(btf); + ASSERT_EQ(err, -ENOENT, "check btf"); + break; + } + + while (test__start_subtest("btf: node incorrect type")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a")) + break; + id = btf__add_struct(btf, "bar", 4); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: multiple bpf_list_node with name b")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 52); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 256, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0); + if (!ASSERT_OK(err, "btf__add_field foo::d")) + break; + id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -EINVAL, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning | owned AA cycle")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 36); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ELOOP, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning | owned ABA cycle")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 36); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) + break; + id = btf__add_struct(btf, "bar", 36); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ELOOP, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning -> owned")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a")) + break; + id = btf__add_struct(btf, "bar", 16); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, 0, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning -> owning | owned -> owned")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) + break; + id = btf__add_struct(btf, "bar", 36); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a")) + break; + id = btf__add_struct(btf, "baz", 16); + if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field baz:a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, 0, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 36); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field foo::c")) + break; + id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) + break; + id = btf__add_struct(btf, "bar", 36); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar:a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar:b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar:c")) + break; + id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a")) + break; + id = btf__add_struct(btf, "baz", 16); + if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field baz:a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ELOOP, "check btf"); + btf__free(btf); + break; + } + + while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) { + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + break; + id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + break; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + break; + id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); + if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) + break; + id = btf__add_struct(btf, "bar", 36); + if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0); + if (!ASSERT_EQ(id, 8, "btf__add_decl_tag")) + break; + id = btf__add_struct(btf, "baz", 36); + if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) + break; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + break; + err = btf__add_field(btf, "b", LIST_NODE, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::b")) + break; + err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + break; + id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0); + if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a")) + break; + id = btf__add_struct(btf, "bam", 16); + if (!ASSERT_EQ(id, 11, "btf__add_struct bam")) + break; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bam::a")) + break; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, -ELOOP, "check btf"); + btf__free(btf); + break; + } +} + +void test_linked_list(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) { + if (!test__start_subtest(linked_list_fail_tests[i].prog_name)) + continue; + test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name, + linked_list_fail_tests[i].err_msg); + } + test_btf(); + test_linked_list_success(PUSH_POP, false); + test_linked_list_success(PUSH_POP, true); + test_linked_list_success(PUSH_POP_MULT, false); + test_linked_list_success(PUSH_POP_MULT, true); + test_linked_list_success(LIST_IN_LIST, false); + test_linked_list_success(LIST_IN_LIST, true); + test_linked_list_success(TEST_ALL, false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c index 1102e4f42d2d..f117bfef68a1 100644 --- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c +++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c @@ -173,10 +173,12 @@ static void test_lsm_cgroup_functional(void) ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count"); ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count"); - /* AF_UNIX is prohibited. */ - fd = socket(AF_UNIX, SOCK_STREAM, 0); - ASSERT_LT(fd, 0, "socket(AF_UNIX)"); + if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR + || skel->kconfig->CONFIG_SECURITY_SELINUX + || skel->kconfig->CONFIG_SECURITY_SMACK)) + /* AF_UNIX is prohibited. */ + ASSERT_LT(fd, 0, "socket(AF_UNIX)"); close(fd); /* AF_INET6 gets default policy (sk_priority). */ @@ -233,11 +235,18 @@ static void test_lsm_cgroup_functional(void) /* AF_INET6+SOCK_STREAM * AF_PACKET+SOCK_RAW + * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed * listen_fd * client_fd * accepted_fd */ - ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2"); + if (skel->kconfig->CONFIG_SECURITY_APPARMOR + || skel->kconfig->CONFIG_SECURITY_SELINUX + || skel->kconfig->CONFIG_SECURITY_SMACK) + /* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */ + ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2"); + else + ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2"); /* start_server * bind(ETH_P_ALL) diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c index fdcea7a61491..0d66b1524208 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c @@ -105,7 +105,7 @@ static void test_map_kptr_success(bool test_run) ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval"); if (test_run) - return; + goto exit; ret = bpf_map__update_elem(skel->maps.array_map, &key, sizeof(key), buf, sizeof(buf), 0); @@ -132,6 +132,7 @@ static void test_map_kptr_success(bool test_run) ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0); ASSERT_OK(ret, "lru_hash_map delete"); +exit: map_kptr__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c index 6d0e50dcf47c..7fc01ff490db 100644 --- a/tools/testing/selftests/bpf/prog_tests/module_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c @@ -103,6 +103,13 @@ void test_module_attach(void) ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); bpf_link__destroy(link); + link = bpf_program__attach(skel->progs.kprobe_multi); + if (!ASSERT_OK_PTR(link, "attach_kprobe_multi")) + goto cleanup; + + ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + bpf_link__destroy(link); + cleanup: test_module_attach__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c new file mode 100644 index 000000000000..447d8560ecb6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/ + +#define _GNU_SOURCE +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <test_progs.h> +#include <bpf/btf.h> +#include "rcu_read_lock.skel.h" +#include "cgroup_helpers.h" + +static unsigned long long cgroup_id; + +static void test_success(void) +{ + struct rcu_read_lock *skel; + int err; + + skel = rcu_read_lock__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->target_pid = syscall(SYS_gettid); + + bpf_program__set_autoload(skel->progs.get_cgroup_id, true); + bpf_program__set_autoload(skel->progs.task_succ, true); + bpf_program__set_autoload(skel->progs.no_lock, true); + bpf_program__set_autoload(skel->progs.two_regions, true); + bpf_program__set_autoload(skel->progs.non_sleepable_1, true); + bpf_program__set_autoload(skel->progs.non_sleepable_2, true); + err = rcu_read_lock__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto out; + + err = rcu_read_lock__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->task_storage_val, 2, "task_storage_val"); + ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id"); +out: + rcu_read_lock__destroy(skel); +} + +static void test_rcuptr_acquire(void) +{ + struct rcu_read_lock *skel; + int err; + + skel = rcu_read_lock__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->target_pid = syscall(SYS_gettid); + + bpf_program__set_autoload(skel->progs.task_acquire, true); + err = rcu_read_lock__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto out; + + err = rcu_read_lock__attach(skel); + ASSERT_OK(err, "skel_attach"); +out: + rcu_read_lock__destroy(skel); +} + +static const char * const inproper_region_tests[] = { + "miss_lock", + "miss_unlock", + "non_sleepable_rcu_mismatch", + "inproper_sleepable_helper", + "inproper_sleepable_kfunc", + "nested_rcu_region", +}; + +static void test_inproper_region(void) +{ + struct rcu_read_lock *skel; + struct bpf_program *prog; + int i, err; + + for (i = 0; i < ARRAY_SIZE(inproper_region_tests); i++) { + skel = rcu_read_lock__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, inproper_region_tests[i]); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto out; + bpf_program__set_autoload(prog, true); + err = rcu_read_lock__load(skel); + ASSERT_ERR(err, "skel_load"); +out: + rcu_read_lock__destroy(skel); + } +} + +static const char * const rcuptr_misuse_tests[] = { + "task_untrusted_non_rcuptr", + "task_untrusted_rcuptr", + "cross_rcu_region", +}; + +static void test_rcuptr_misuse(void) +{ + struct rcu_read_lock *skel; + struct bpf_program *prog; + int i, err; + + for (i = 0; i < ARRAY_SIZE(rcuptr_misuse_tests); i++) { + skel = rcu_read_lock__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, rcuptr_misuse_tests[i]); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto out; + bpf_program__set_autoload(prog, true); + err = rcu_read_lock__load(skel); + ASSERT_ERR(err, "skel_load"); +out: + rcu_read_lock__destroy(skel); + } +} + +void test_rcu_read_lock(void) +{ + struct btf *vmlinux_btf; + int cgroup_fd; + + vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF")) + return; + if (btf__find_by_name_kind(vmlinux_btf, "rcu", BTF_KIND_TYPE_TAG) < 0) { + test__skip(); + goto out; + } + + cgroup_fd = test__join_cgroup("/rcu_read_lock"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock")) + goto out; + + cgroup_id = get_cgroup_id("/rcu_read_lock"); + if (test__start_subtest("success")) + test_success(); + if (test__start_subtest("rcuptr_acquire")) + test_rcuptr_acquire(); + if (test__start_subtest("negative_tests_inproper_region")) + test_inproper_region(); + if (test__start_subtest("negative_tests_rcuptr_misuse")) + test_rcuptr_misuse(); + close(cgroup_fd); +out: + btf__free(vmlinux_btf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c index 9a80fe8a6427..ac104dc652e3 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -13,6 +13,7 @@ #include <linux/perf_event.h> #include <linux/ring_buffer.h> #include "test_ringbuf.lskel.h" +#include "test_ringbuf_map_key.lskel.h" #define EDONE 7777 @@ -58,6 +59,7 @@ static int process_sample(void *ctx, void *data, size_t len) } } +static struct test_ringbuf_map_key_lskel *skel_map_key; static struct test_ringbuf_lskel *skel; static struct ring_buffer *ringbuf; @@ -81,7 +83,7 @@ static void *poll_thread(void *input) return (void *)(long)ring_buffer__poll(ringbuf, timeout); } -void test_ringbuf(void) +static void ringbuf_subtest(void) { const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample); pthread_t thread; @@ -297,3 +299,65 @@ cleanup: ring_buffer__free(ringbuf); test_ringbuf_lskel__destroy(skel); } + +static int process_map_key_sample(void *ctx, void *data, size_t len) +{ + struct sample *s; + int err, val; + + s = data; + switch (s->seq) { + case 1: + ASSERT_EQ(s->value, 42, "sample_value"); + err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd, + s, &val); + ASSERT_OK(err, "hash_map bpf_map_lookup_elem"); + ASSERT_EQ(val, 1, "hash_map val"); + return -EDONE; + default: + return 0; + } +} + +static void ringbuf_map_key_subtest(void) +{ + int err; + + skel_map_key = test_ringbuf_map_key_lskel__open(); + if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open")) + return; + + skel_map_key->maps.ringbuf.max_entries = getpagesize(); + skel_map_key->bss->pid = getpid(); + + err = test_ringbuf_map_key_lskel__load(skel_map_key); + if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load")) + goto cleanup; + + ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd, + process_map_key_sample, NULL, NULL); + if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new")) + goto cleanup; + + err = test_ringbuf_map_key_lskel__attach(skel_map_key); + if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach")) + goto cleanup_ringbuf; + + syscall(__NR_getpgid); + ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq"); + err = ring_buffer__poll(ringbuf, -1); + ASSERT_EQ(err, -EDONE, "ring_buffer__poll"); + +cleanup_ringbuf: + ring_buffer__free(ringbuf); +cleanup: + test_ringbuf_map_key_lskel__destroy(skel_map_key); +} + +void test_ringbuf(void) +{ + if (test__start_subtest("ringbuf")) + ringbuf_subtest(); + if (test__start_subtest("ringbuf_map_key")) + ringbuf_map_key_subtest(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c index 99dac5292b41..bc6817aee9aa 100644 --- a/tools/testing/selftests/bpf/prog_tests/skeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 Facebook */ #include <test_progs.h> +#include <sys/mman.h> struct s { int a; @@ -22,7 +23,8 @@ void test_skeleton(void) struct test_skeleton__kconfig *kcfg; const void *elf_bytes; size_t elf_bytes_sz = 0; - int i; + void *m; + int i, fd; skel = test_skeleton__open(); if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) @@ -124,6 +126,13 @@ void test_skeleton(void) ASSERT_EQ(bss->huge_arr[ARRAY_SIZE(bss->huge_arr) - 1], 123, "huge_arr"); + fd = bpf_map__fd(skel->maps.data_non_mmapable); + m = mmap(NULL, getpagesize(), PROT_READ, MAP_SHARED, fd, 0); + if (!ASSERT_EQ(m, MAP_FAILED, "unexpected_mmap_success")) + munmap(m, getpagesize()); + + ASSERT_EQ(bpf_map__map_flags(skel->maps.data_non_mmapable), 0, "non_mmap_flags"); + elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz); ASSERT_OK_PTR(elf_bytes, "elf_bytes"); ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz"); diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c new file mode 100644 index 000000000000..d9270bd3d920 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "test_spin_lock.skel.h" +#include "test_spin_lock_fail.skel.h" + +static char log_buf[1024 * 1024]; + +static struct { + const char *prog_name; + const char *err_msg; +} spin_lock_fail_tests[] = { + { "lock_id_kptr_preserve", + "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) " + "R1_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n" + "R1 type=ptr_ expected=percpu_ptr_" }, + { "lock_id_global_zero", + "; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n" + "R1 type=map_value expected=percpu_ptr_" }, + { "lock_id_mapval_preserve", + "8: (bf) r1 = r0 ; R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0) " + "R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n9: (85) call bpf_this_cpu_ptr#154\n" + "R1 type=map_value expected=percpu_ptr_" }, + { "lock_id_innermapval_preserve", + "13: (bf) r1 = r0 ; R0=map_value(id=2,off=0,ks=4,vs=8,imm=0) " + "R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n14: (85) call bpf_this_cpu_ptr#154\n" + "R1 type=map_value expected=percpu_ptr_" }, + { "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_kptr_mapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_kptr_innermapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_global_global", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_global_kptr", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_global_mapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_global_innermapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_mapval_mapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_mapval_kptr", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_mapval_global", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_mapval_innermapval", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_innermapval1", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_innermapval2", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" }, + { "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" }, +}; + +static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct test_spin_lock_fail *skel; + struct bpf_program *prog; + int ret; + + skel = test_spin_lock_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "test_spin_lock_fail__open_opts")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto end; + + bpf_program__set_autoload(prog, true); + + ret = test_spin_lock_fail__load(skel); + if (!ASSERT_ERR(ret, "test_spin_lock_fail__load must fail")) + goto end; + + /* Skip check if JIT does not support kfuncs */ + if (strstr(log_buf, "JIT does not support calling kernel function")) { + test__skip(); + goto end; + } + + if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + +end: + test_spin_lock_fail__destroy(skel); +} + +static void *spin_lock_thread(void *arg) +{ + int err, prog_fd = *(u32 *) arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10000, + ); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); + pthread_exit(arg); +} + +void test_spin_lock_success(void) +{ + struct test_spin_lock *skel; + pthread_t thread_id[4]; + int prog_fd, i; + void *ret; + + skel = test_spin_lock__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_spin_lock__open_and_load")) + return; + prog_fd = bpf_program__fd(skel->progs.bpf_spin_lock_test); + for (i = 0; i < 4; i++) { + int err; + + err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd); + if (!ASSERT_OK(err, "pthread_create")) + goto end; + } + + for (i = 0; i < 4; i++) { + if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join")) + goto end; + if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd")) + goto end; + } +end: + test_spin_lock__destroy(skel); +} + +void test_spin_lock(void) +{ + int i; + + test_spin_lock_success(); + + for (i = 0; i < ARRAY_SIZE(spin_lock_fail_tests); i++) { + if (!test__start_subtest(spin_lock_fail_tests[i].prog_name)) + continue; + test_spin_lock_fail_prog(spin_lock_fail_tests[i].prog_name, + spin_lock_fail_tests[i].err_msg); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c deleted file mode 100644 index 15eb1372d771..000000000000 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <test_progs.h> -#include <network_helpers.h> - -static void *spin_lock_thread(void *arg) -{ - int err, prog_fd = *(u32 *) arg; - LIBBPF_OPTS(bpf_test_run_opts, topts, - .data_in = &pkt_v4, - .data_size_in = sizeof(pkt_v4), - .repeat = 10000, - ); - - err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "test_run"); - ASSERT_OK(topts.retval, "test_run retval"); - pthread_exit(arg); -} - -void test_spinlock(void) -{ - const char *file = "./test_spin_lock.bpf.o"; - pthread_t thread_id[4]; - struct bpf_object *obj = NULL; - int prog_fd; - int err = 0, i; - void *ret; - - err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); - if (CHECK_FAIL(err)) { - printf("test_spin_lock:bpf_prog_test_load errno %d\n", errno); - goto close_prog; - } - for (i = 0; i < 4; i++) - if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, - &spin_lock_thread, &prog_fd))) - goto close_prog; - - for (i = 0; i < 4; i++) - if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || - ret != (void *)&prog_fd)) - goto close_prog; -close_prog: - bpf_object__close(obj); -} diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c new file mode 100644 index 000000000000..ffd8ef4303c8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include <sys/wait.h> +#include <test_progs.h> +#include <unistd.h> + +#include "task_kfunc_failure.skel.h" +#include "task_kfunc_success.skel.h" + +static size_t log_buf_sz = 1 << 20; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct task_kfunc_success *open_load_task_kfunc_skel(void) +{ + struct task_kfunc_success *skel; + int err; + + skel = task_kfunc_success__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return NULL; + + skel->bss->pid = getpid(); + + err = task_kfunc_success__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + return skel; + +cleanup: + task_kfunc_success__destroy(skel); + return NULL; +} + +static void run_success_test(const char *prog_name) +{ + struct task_kfunc_success *skel; + int status; + pid_t child_pid; + struct bpf_program *prog; + struct bpf_link *link = NULL; + + skel = open_load_task_kfunc_skel(); + if (!ASSERT_OK_PTR(skel, "open_load_skel")) + return; + + if (!ASSERT_OK(skel->bss->err, "pre_spawn_err")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "attached_link")) + goto cleanup; + + child_pid = fork(); + if (!ASSERT_GT(child_pid, -1, "child_pid")) + goto cleanup; + if (child_pid == 0) + _exit(0); + waitpid(child_pid, &status, 0); + + ASSERT_OK(skel->bss->err, "post_wait_err"); + +cleanup: + bpf_link__destroy(link); + task_kfunc_success__destroy(skel); +} + +static const char * const success_tests[] = { + "test_task_acquire_release_argument", + "test_task_acquire_release_current", + "test_task_acquire_leave_in_map", + "test_task_xchg_release", + "test_task_get_release", + "test_task_current_acquire_release", + "test_task_from_pid_arg", + "test_task_from_pid_current", + "test_task_from_pid_invalid", +}; + +static struct { + const char *prog_name; + const char *expected_err_msg; +} failure_tests[] = { + {"task_kfunc_acquire_untrusted", "R1 must be referenced or trusted"}, + {"task_kfunc_acquire_fp", "arg#0 pointer type STRUCT task_struct must point"}, + {"task_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"}, + {"task_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"}, + {"task_kfunc_acquire_null", "arg#0 pointer type STRUCT task_struct must point"}, + {"task_kfunc_acquire_unreleased", "Unreleased reference"}, + {"task_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"}, + {"task_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"}, + {"task_kfunc_get_null", "arg#0 expected pointer to map value"}, + {"task_kfunc_xchg_unreleased", "Unreleased reference"}, + {"task_kfunc_get_unreleased", "Unreleased reference"}, + {"task_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"}, + {"task_kfunc_release_fp", "arg#0 pointer type STRUCT task_struct must point"}, + {"task_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"}, + {"task_kfunc_release_unacquired", "release kernel function bpf_task_release expects"}, + {"task_kfunc_from_pid_no_null_check", "arg#0 is ptr_or_null_ expected ptr_ or socket"}, +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct task_kfunc_failure *skel; + int err, i; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = task_kfunc_failure__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "task_kfunc_failure__open_opts")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + struct bpf_program *prog; + const char *curr_name = failure_tests[i].prog_name; + + prog = bpf_object__find_program_by_name(skel->obj, curr_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name)); + } + + err = task_kfunc_failure__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + task_kfunc_failure__destroy(skel); +} + +void test_task_kfunc(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { + if (!test__start_subtest(success_tests[i])) + continue; + + run_success_test(success_tests[i]); + } + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + if (!test__start_subtest(failure_tests[i].prog_name)) + continue; + + verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index 035c263aab1b..a176bd75a748 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -3,12 +3,16 @@ #define _GNU_SOURCE /* See feature_test_macros(7) */ #include <unistd.h> +#include <sched.h> +#include <pthread.h> #include <sys/syscall.h> /* For SYS_xxx definitions */ #include <sys/types.h> #include <test_progs.h> +#include "task_local_storage_helpers.h" #include "task_local_storage.skel.h" #include "task_local_storage_exit_creds.skel.h" #include "task_ls_recursion.skel.h" +#include "task_storage_nodeadlock.skel.h" static void test_sys_enter_exit(void) { @@ -39,7 +43,8 @@ out: static void test_exit_creds(void) { struct task_local_storage_exit_creds *skel; - int err; + int err, run_count, sync_rcu_calls = 0; + const int MAX_SYNC_RCU_CALLS = 1000; skel = task_local_storage_exit_creds__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) @@ -53,8 +58,19 @@ static void test_exit_creds(void) if (CHECK_FAIL(system("ls > /dev/null"))) goto out; - /* sync rcu to make sure exit_creds() is called for "ls" */ - kern_sync_rcu(); + /* kern_sync_rcu is not enough on its own as the read section we want + * to wait for may start after we enter synchronize_rcu, so our call + * won't wait for the section to finish. Loop on the run counter + * as well to ensure the program has run. + */ + do { + kern_sync_rcu(); + run_count = __atomic_load_n(&skel->bss->run_count, __ATOMIC_SEQ_CST); + } while (run_count == 0 && ++sync_rcu_calls < MAX_SYNC_RCU_CALLS); + + ASSERT_NEQ(sync_rcu_calls, MAX_SYNC_RCU_CALLS, + "sync_rcu count too high"); + ASSERT_NEQ(run_count, 0, "run_count"); ASSERT_EQ(skel->bss->valid_ptr_count, 0, "valid_ptr_count"); ASSERT_NEQ(skel->bss->null_ptr_count, 0, "null_ptr_count"); out: @@ -63,24 +79,160 @@ out: static void test_recursion(void) { + int err, map_fd, prog_fd, task_fd; struct task_ls_recursion *skel; - int err; + struct bpf_prog_info info; + __u32 info_len = sizeof(info); + long value; + + task_fd = sys_pidfd_open(getpid(), 0); + if (!ASSERT_NEQ(task_fd, -1, "sys_pidfd_open")) + return; skel = task_ls_recursion__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) - return; + goto out; err = task_ls_recursion__attach(skel); if (!ASSERT_OK(err, "skel_attach")) goto out; /* trigger sys_enter, make sure it does not cause deadlock */ + skel->bss->test_pid = getpid(); syscall(SYS_gettid); + skel->bss->test_pid = 0; + task_ls_recursion__detach(skel); + + /* Refer to the comment in BPF_PROG(on_update) for + * the explanation on the value 201 and 100. + */ + map_fd = bpf_map__fd(skel->maps.map_a); + err = bpf_map_lookup_elem(map_fd, &task_fd, &value); + ASSERT_OK(err, "lookup map_a"); + ASSERT_EQ(value, 201, "map_a value"); + ASSERT_EQ(skel->bss->nr_del_errs, 1, "bpf_task_storage_delete busy"); + + map_fd = bpf_map__fd(skel->maps.map_b); + err = bpf_map_lookup_elem(map_fd, &task_fd, &value); + ASSERT_OK(err, "lookup map_b"); + ASSERT_EQ(value, 100, "map_b value"); + + prog_fd = bpf_program__fd(skel->progs.on_lookup); + memset(&info, 0, sizeof(info)); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + ASSERT_OK(err, "get prog info"); + ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion"); + + prog_fd = bpf_program__fd(skel->progs.on_update); + memset(&info, 0, sizeof(info)); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + ASSERT_OK(err, "get prog info"); + ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion"); + + prog_fd = bpf_program__fd(skel->progs.on_enter); + memset(&info, 0, sizeof(info)); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + ASSERT_OK(err, "get prog info"); + ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion"); out: + close(task_fd); task_ls_recursion__destroy(skel); } +static bool stop; + +static void waitall(const pthread_t *tids, int nr) +{ + int i; + + stop = true; + for (i = 0; i < nr; i++) + pthread_join(tids[i], NULL); +} + +static void *sock_create_loop(void *arg) +{ + struct task_storage_nodeadlock *skel = arg; + int fd; + + while (!stop) { + fd = socket(AF_INET, SOCK_STREAM, 0); + close(fd); + if (skel->bss->nr_get_errs || skel->bss->nr_del_errs) + stop = true; + } + + return NULL; +} + +static void test_nodeadlock(void) +{ + struct task_storage_nodeadlock *skel; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + const int nr_threads = 32; + pthread_t tids[nr_threads]; + int i, prog_fd, err; + cpu_set_t old, new; + + /* Pin all threads to one cpu to increase the chance of preemption + * in a sleepable bpf prog. + */ + CPU_ZERO(&new); + CPU_SET(0, &new); + err = sched_getaffinity(getpid(), sizeof(old), &old); + if (!ASSERT_OK(err, "getaffinity")) + return; + err = sched_setaffinity(getpid(), sizeof(new), &new); + if (!ASSERT_OK(err, "setaffinity")) + return; + + skel = task_storage_nodeadlock__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto done; + + /* Unnecessary recursion and deadlock detection are reproducible + * in the preemptible kernel. + */ + if (!skel->kconfig->CONFIG_PREEMPT) { + test__skip(); + goto done; + } + + err = task_storage_nodeadlock__attach(skel); + ASSERT_OK(err, "attach prog"); + + for (i = 0; i < nr_threads; i++) { + err = pthread_create(&tids[i], NULL, sock_create_loop, skel); + if (err) { + /* Only assert once here to avoid excessive + * PASS printing during test failure. + */ + ASSERT_OK(err, "pthread_create"); + waitall(tids, i); + goto done; + } + } + + /* With 32 threads, 1s is enough to reproduce the issue */ + sleep(1); + waitall(tids, nr_threads); + + info_len = sizeof(info); + prog_fd = bpf_program__fd(skel->progs.socket_post_create); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + ASSERT_OK(err, "get prog info"); + ASSERT_EQ(info.recursion_misses, 0, "prog recursion"); + + ASSERT_EQ(skel->bss->nr_get_errs, 0, "bpf_task_storage_get busy"); + ASSERT_EQ(skel->bss->nr_del_errs, 0, "bpf_task_storage_delete busy"); + +done: + task_storage_nodeadlock__destroy(skel); + sched_setaffinity(getpid(), sizeof(old), &old); +} + void test_task_local_storage(void) { if (test__start_subtest("sys_enter_exit")) @@ -89,4 +241,6 @@ void test_task_local_storage(void) test_exit_creds(); if (test__start_subtest("recursion")) test_recursion(); + if (test__start_subtest("nodeadlock")) + test_nodeadlock(); } diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c index 617bbce6ef8f..5cf85d0f9827 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c @@ -485,7 +485,7 @@ static void misc(void) goto check_linum; ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg)); - if (ASSERT_EQ(ret, sizeof(send_msg), "read(msg)")) + if (!ASSERT_EQ(ret, sizeof(send_msg), "read(msg)")) goto check_linum; } @@ -505,6 +505,8 @@ static void misc(void) ASSERT_EQ(misc_skel->bss->nr_fin, 1, "unexpected nr_fin"); + ASSERT_EQ(misc_skel->bss->nr_hwtstamp, 0, "nr_hwtstamp"); + check_linum: ASSERT_FALSE(check_error_linum(&sk_fds), "check_error_linum"); sk_fds_close(&sk_fds); @@ -539,7 +541,7 @@ void test_tcp_hdr_options(void) goto skel_destroy; cg_fd = test__join_cgroup(CG_NAME); - if (ASSERT_GE(cg_fd, 0, "join_cgroup")) + if (!ASSERT_GE(cg_fd, 0, "join_cgroup")) goto skel_destroy; for (i = 0; i < ARRAY_SIZE(tests); i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index d5022b91d1e4..48dc9472e160 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -15,7 +15,7 @@ static void test_fentry(void) err = tracing_struct__attach(skel); if (!ASSERT_OK(err, "tracing_struct__attach")) - return; + goto destroy_skel; ASSERT_OK(trigger_module_test_read(256), "trigger_read"); @@ -54,6 +54,7 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret"); tracing_struct__detach(skel); +destroy_skel: tracing_struct__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/type_cast.c b/tools/testing/selftests/bpf/prog_tests/type_cast.c new file mode 100644 index 000000000000..9317d5fa2635 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/type_cast.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <network_helpers.h> +#include "type_cast.skel.h" + +static void test_xdp(void) +{ + struct type_cast *skel; + int err, prog_fd; + char buf[128]; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); + + skel = type_cast__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.md_xdp, true); + err = type_cast__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.md_xdp); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp test_run retval"); + + ASSERT_EQ(skel->bss->ifindex, 1, "xdp_md ifindex"); + ASSERT_EQ(skel->bss->ifindex, skel->bss->ingress_ifindex, "xdp_md ingress_ifindex"); + ASSERT_STREQ(skel->bss->name, "lo", "xdp_md name"); + ASSERT_NEQ(skel->bss->inum, 0, "xdp_md inum"); + +out: + type_cast__destroy(skel); +} + +static void test_tc(void) +{ + struct type_cast *skel; + int err, prog_fd; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = type_cast__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.md_skb, true); + err = type_cast__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.md_skb); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "tc test_run retval"); + + ASSERT_EQ(skel->bss->meta_len, 0, "skb meta_len"); + ASSERT_EQ(skel->bss->frag0_len, 0, "skb frag0_len"); + ASSERT_NEQ(skel->bss->kskb_len, 0, "skb len"); + ASSERT_NEQ(skel->bss->kskb2_len, 0, "skb2 len"); + ASSERT_EQ(skel->bss->kskb_len, skel->bss->kskb2_len, "skb len compare"); + +out: + type_cast__destroy(skel); +} + +static const char * const negative_tests[] = { + "untrusted_ptr", + "kctx_u64", +}; + +static void test_negative(void) +{ + struct bpf_program *prog; + struct type_cast *skel; + int i, err; + + for (i = 0; i < ARRAY_SIZE(negative_tests); i++) { + skel = type_cast__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, negative_tests[i]); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto out; + bpf_program__set_autoload(prog, true); + err = type_cast__load(skel); + ASSERT_ERR(err, "skel_load"); +out: + type_cast__destroy(skel); + } +} + +void test_type_cast(void) +{ + if (test__start_subtest("xdp")) + test_xdp(); + if (test__start_subtest("tc")) + test_tc(); + if (test__start_subtest("negative")) + test_negative(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 9b9cf8458adf..39973ea1ce43 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -18,7 +18,7 @@ static void test_xdp_adjust_tail_shrink(void) ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (ASSERT_OK(err, "test_xdp_adjust_tail_shrink")) + if (!ASSERT_OK(err, "test_xdp_adjust_tail_shrink")) return; err = bpf_prog_test_run_opts(prog_fd, &topts); @@ -53,7 +53,7 @@ static void test_xdp_adjust_tail_grow(void) ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (ASSERT_OK(err, "test_xdp_adjust_tail_grow")) + if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow")) return; err = bpf_prog_test_run_opts(prog_fd, &topts); @@ -63,6 +63,7 @@ static void test_xdp_adjust_tail_grow(void) expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */ topts.data_in = &pkt_v6; topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "ipv6"); ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); @@ -89,7 +90,7 @@ static void test_xdp_adjust_tail_grow2(void) ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (ASSERT_OK(err, "test_xdp_adjust_tail_grow")) + if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow")) return; /* Test case-64 */ diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index a50971c6cf4a..9ac6f6a268db 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -85,7 +85,7 @@ static void test_max_pkt_size(int fd) } #define NUM_PKTS 10000 -void test_xdp_do_redirect(void) +void serial_test_xdp_do_redirect(void) { int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst; char data[sizeof(pkt_udp) + sizeof(__u32)]; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c index 75550a40e029..13daa3746064 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c @@ -94,12 +94,12 @@ static void test_synproxy(bool xdp) SYS("sysctl -w net.ipv4.tcp_syncookies=2"); SYS("sysctl -w net.ipv4.tcp_timestamps=1"); SYS("sysctl -w net.netfilter.nf_conntrack_tcp_loose=0"); - SYS("iptables -t raw -I PREROUTING \ + SYS("iptables-legacy -t raw -I PREROUTING \ -i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack"); - SYS("iptables -t filter -A INPUT \ + SYS("iptables-legacy -t filter -A INPUT \ -i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \ -j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460"); - SYS("iptables -t filter -A INPUT \ + SYS("iptables-legacy -t filter -A INPUT \ -i tmp1 -m state --state INVALID -j DROP"); ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \ @@ -174,7 +174,7 @@ out: system("ip netns del synproxy"); } -void test_xdp_synproxy(void) +void serial_test_xdp_synproxy(void) { if (test__start_subtest("xdp")) test_synproxy(true); |