diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/align.c | 42 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/bind_perm.c | 6 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/bpf_iter.c | 87 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c | 204 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c | 48 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c | 2 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/btf.c | 1 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c | 158 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c | 2 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c | 33 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/log_buf.c | 4 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/reg_bounds.c | 2124 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/spin_lock.c | 14 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/tc_opts.c | 6 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/vmlinux.c | 16 |
15 files changed, 2496 insertions, 251 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 465c1c3a3d3c..4ebd0da898f5 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -40,7 +40,7 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1", "ctx(off=0,imm=0)"}, + {0, "R1", "ctx()"}, {0, "R10", "fp0"}, {0, "R3_w", "2"}, {1, "R3_w", "4"}, @@ -68,7 +68,7 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1", "ctx(off=0,imm=0)"}, + {0, "R1", "ctx()"}, {0, "R10", "fp0"}, {0, "R3_w", "1"}, {1, "R3_w", "2"}, @@ -97,7 +97,7 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1", "ctx(off=0,imm=0)"}, + {0, "R1", "ctx()"}, {0, "R10", "fp0"}, {0, "R3_w", "4"}, {1, "R3_w", "8"}, @@ -119,7 +119,7 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1", "ctx(off=0,imm=0)"}, + {0, "R1", "ctx()"}, {0, "R10", "fp0"}, {0, "R3_w", "7"}, {1, "R3_w", "7"}, @@ -162,13 +162,13 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {6, "R0_w", "pkt(off=8,r=8,imm=0)"}, + {6, "R0_w", "pkt(off=8,r=8)"}, {6, "R3_w", "var_off=(0x0; 0xff)"}, {7, "R3_w", "var_off=(0x0; 0x1fe)"}, {8, "R3_w", "var_off=(0x0; 0x3fc)"}, {9, "R3_w", "var_off=(0x0; 0x7f8)"}, {10, "R3_w", "var_off=(0x0; 0xff0)"}, - {12, "R3_w", "pkt_end(off=0,imm=0)"}, + {12, "R3_w", "pkt_end()"}, {17, "R4_w", "var_off=(0x0; 0xff)"}, {18, "R4_w", "var_off=(0x0; 0x1fe0)"}, {19, "R4_w", "var_off=(0x0; 0xff0)"}, @@ -235,11 +235,11 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {2, "R5_w", "pkt(off=0,r=0,imm=0)"}, - {4, "R5_w", "pkt(off=14,r=0,imm=0)"}, - {5, "R4_w", "pkt(off=14,r=0,imm=0)"}, - {9, "R2", "pkt(off=0,r=18,imm=0)"}, - {10, "R5", "pkt(off=14,r=18,imm=0)"}, + {2, "R5_w", "pkt(r=0)"}, + {4, "R5_w", "pkt(off=14,r=0)"}, + {5, "R4_w", "pkt(off=14,r=0)"}, + {9, "R2", "pkt(r=18)"}, + {10, "R5", "pkt(off=14,r=18)"}, {10, "R4_w", "var_off=(0x0; 0xff)"}, {13, "R4_w", "var_off=(0x0; 0xffff)"}, {14, "R4_w", "var_off=(0x0; 0xffff)"}, @@ -299,7 +299,7 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w", "pkt(off=0,r=8,imm=0)"}, + {6, "R2_w", "pkt(r=8)"}, {7, "R6_w", "var_off=(0x0; 0x3fc)"}, /* Offset is added to packet pointer R5, resulting in * known fixed offset, and variable offset from R6. @@ -337,7 +337,7 @@ static struct bpf_align_test tests[] = { /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - {26, "R5_w", "pkt(off=14,r=8,"}, + {26, "R5_w", "pkt(off=14,r=8)"}, /* Variable offset is added to R5, resulting in a * variable offset of (4n). See comment for insn #18 * for R4 = R5 trick. @@ -397,7 +397,7 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w", "pkt(off=0,r=8,imm=0)"}, + {6, "R2_w", "pkt(r=8)"}, {7, "R6_w", "var_off=(0x0; 0x3fc)"}, /* Adding 14 makes R6 be (4n+2) */ {8, "R6_w", "var_off=(0x2; 0x7fc)"}, @@ -459,7 +459,7 @@ static struct bpf_align_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, .matches = { - {3, "R5_w", "pkt_end(off=0,imm=0)"}, + {3, "R5_w", "pkt_end()"}, /* (ptr - ptr) << 2 == unknown, (4n) */ {5, "R5_w", "var_off=(0x0; 0xfffffffffffffffc)"}, /* (4n) + 14 == (4n+2). We blow our bounds, because @@ -513,7 +513,7 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w", "pkt(off=0,r=8,imm=0)"}, + {6, "R2_w", "pkt(r=8)"}, {8, "R6_w", "var_off=(0x0; 0x3fc)"}, /* Adding 14 makes R6 be (4n+2) */ {9, "R6_w", "var_off=(0x2; 0x7fc)"}, @@ -566,7 +566,7 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w", "pkt(off=0,r=8,imm=0)"}, + {6, "R2_w", "pkt(r=8)"}, {9, "R6_w", "var_off=(0x0; 0x3c)"}, /* Adding 14 makes R6 be (4n+2) */ {10, "R6_w", "var_off=(0x2; 0x7c)"}, @@ -659,14 +659,14 @@ static int do_test_single(struct bpf_align_test *test) /* Check the next line as well in case the previous line * did not have a corresponding bpf insn. Example: * func#0 @0 - * 0: R1=ctx(off=0,imm=0) R10=fp0 + * 0: R1=ctx() R10=fp0 * 0: (b7) r3 = 2 ; R3_w=2 * * Sometimes it's actually two lines below, e.g. when * searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))": - * from 4 to 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 - * 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 - * 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(off=0,r=8,imm=0) R3_w=scalar(umax=255,var_off=(0x0; 0xff)) + * from 4 to 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0 + * 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0 + * 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(r=8) R3_w=scalar(umax=255,var_off=(0x0; 0xff)) */ while (!(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) { cur_line = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c index a1766a298bb7..f7cd129cb82b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bind_perm.c +++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c @@ -9,8 +9,6 @@ #include "cap_helpers.h" #include "bind_perm.skel.h" -static int duration; - static int create_netns(void) { if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) @@ -27,7 +25,7 @@ void try_bind(int family, int port, int expected_errno) int fd = -1; fd = socket(family, SOCK_STREAM, 0); - if (CHECK(fd < 0, "fd", "errno %d", errno)) + if (!ASSERT_GE(fd, 0, "socket")) goto close_socket; if (family == AF_INET) { @@ -60,7 +58,7 @@ void test_bind_perm(void) return; cgroup_fd = test__join_cgroup("/bind_perm"); - if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno)) + if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) return; skel = bind_perm__open_and_load(); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index e3498f607b49..618af9dfae9b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -34,8 +34,6 @@ #include "bpf_iter_ksym.skel.h" #include "bpf_iter_sockmap.skel.h" -static int duration; - static void test_btf_id_or_null(void) { struct bpf_iter_test_kern3 *skel; @@ -64,7 +62,7 @@ static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_ /* not check contents, but ensure read() ends without error */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0) ; - CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); + ASSERT_GE(len, 0, "read"); close(iter_fd); @@ -334,6 +332,8 @@ static void test_task_stack(void) do_dummy_read(skel->progs.dump_task_stack); do_dummy_read(skel->progs.get_task_user_stacks); + ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks"); + bpf_iter_task_stack__destroy(skel); } @@ -413,7 +413,7 @@ static int do_btf_read(struct bpf_iter_task_btf *skel) goto free_link; } - if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno))) + if (!ASSERT_GE(err, 0, "read")) goto free_link; ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)", @@ -526,11 +526,11 @@ static int do_read_with_fd(int iter_fd, const char *expected, start = 0; while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { start += len; - if (CHECK(start >= 16, "read", "read len %d\n", len)) + if (!ASSERT_LT(start, 16, "read")) return -1; read_buf_len = read_one_char ? 1 : 16 - start; } - if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) return -1; if (!ASSERT_STREQ(buf, expected, "read")) @@ -571,8 +571,7 @@ static int do_read(const char *path, const char *expected) int err, iter_fd; iter_fd = open(path, O_RDONLY); - if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", - path, strerror(errno))) + if (!ASSERT_GE(iter_fd, 0, "open")) return -1; err = do_read_with_fd(iter_fd, expected, false); @@ -600,7 +599,7 @@ static void test_file_iter(void) unlink(path); err = bpf_link__pin(link, path); - if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) + if (!ASSERT_OK(err, "pin_iter")) goto free_link; err = do_read(path, "abcd"); @@ -651,12 +650,10 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) * overflow and needs restart. */ map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); - if (CHECK(map1_fd < 0, "bpf_map_create", - "map_creation failed: %s\n", strerror(errno))) + if (!ASSERT_GE(map1_fd, 0, "bpf_map_create")) goto out; map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); - if (CHECK(map2_fd < 0, "bpf_map_create", - "map_creation failed: %s\n", strerror(errno))) + if (!ASSERT_GE(map2_fd, 0, "bpf_map_create")) goto free_map1; /* bpf_seq_printf kernel buffer is 8 pages, so one map @@ -685,14 +682,12 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) /* setup filtering map_id in bpf program */ map_info_len = sizeof(map_info); err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len); - if (CHECK(err, "get_map_info", "get map info failed: %s\n", - strerror(errno))) + if (!ASSERT_OK(err, "get_map_info")) goto free_map2; skel->bss->map1_id = map_info.id; err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len); - if (CHECK(err, "get_map_info", "get map info failed: %s\n", - strerror(errno))) + if (!ASSERT_OK(err, "get_map_info")) goto free_map2; skel->bss->map2_id = map_info.id; @@ -705,7 +700,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) goto free_link; buf = malloc(expected_read_len); - if (!buf) + if (!ASSERT_OK_PTR(buf, "malloc")) goto close_iter; /* do read */ @@ -714,16 +709,14 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) while ((len = read(iter_fd, buf, expected_read_len)) > 0) total_read_len += len; - CHECK(len != -1 || errno != E2BIG, "read", - "expected ret -1, errno E2BIG, but get ret %d, error %s\n", - len, strerror(errno)); + ASSERT_EQ(len, -1, "read"); + ASSERT_EQ(errno, E2BIG, "read"); goto free_buf; } else if (!ret1) { while ((len = read(iter_fd, buf, expected_read_len)) > 0) total_read_len += len; - if (CHECK(len < 0, "read", "read failed: %s\n", - strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto free_buf; } else { do { @@ -732,8 +725,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) total_read_len += len; } while (len > 0 || len == -EAGAIN); - if (CHECK(len < 0, "read", "read failed: %s\n", - strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto free_buf; } @@ -836,7 +828,7 @@ static void test_bpf_hash_map(void) /* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0) ; - if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto close_iter; /* test results */ @@ -878,6 +870,8 @@ static void test_bpf_percpu_hash_map(void) skel->rodata->num_cpus = bpf_num_possible_cpus(); val = malloc(8 * bpf_num_possible_cpus()); + if (!ASSERT_OK_PTR(val, "malloc")) + goto out; err = bpf_iter_bpf_percpu_hash_map__load(skel); if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load")) @@ -917,7 +911,7 @@ static void test_bpf_percpu_hash_map(void) /* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0) ; - if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto close_iter; /* test results */ @@ -983,17 +977,14 @@ static void test_bpf_array_map(void) start = 0; while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) start += len; - if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto close_iter; /* test results */ res_first_key = *(__u32 *)buf; res_first_val = *(__u64 *)(buf + sizeof(__u32)); - if (CHECK(res_first_key != 0 || res_first_val != first_val, - "bpf_seq_write", - "seq_write failure: first key %u vs expected 0, " - " first value %llu vs expected %llu\n", - res_first_key, res_first_val, first_val)) + if (!ASSERT_EQ(res_first_key, 0, "bpf_seq_write") || + !ASSERT_EQ(res_first_val, first_val, "bpf_seq_write")) goto close_iter; if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) @@ -1057,6 +1048,8 @@ static void test_bpf_percpu_array_map(void) skel->rodata->num_cpus = bpf_num_possible_cpus(); val = malloc(8 * bpf_num_possible_cpus()); + if (!ASSERT_OK_PTR(val, "malloc")) + goto out; err = bpf_iter_bpf_percpu_array_map__load(skel); if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load")) @@ -1092,7 +1085,7 @@ static void test_bpf_percpu_array_map(void) /* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0) ; - if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto close_iter; /* test results */ @@ -1131,6 +1124,7 @@ static void test_bpf_sk_storage_delete(void) sock_fd = socket(AF_INET6, SOCK_STREAM, 0); if (!ASSERT_GE(sock_fd, 0, "socket")) goto out; + err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); if (!ASSERT_OK(err, "map_update")) goto out; @@ -1151,14 +1145,19 @@ static void test_bpf_sk_storage_delete(void) /* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0) ; - if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto close_iter; /* test results */ err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); - if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", - "map value wasn't deleted (err=%d, errno=%d)\n", err, errno)) - goto close_iter; + + /* Note: The following assertions serve to ensure + * the value was deleted. It does so by asserting + * that bpf_map_lookup_elem has failed. This might + * seem counterintuitive at first. + */ + ASSERT_ERR(err, "bpf_map_lookup_elem"); + ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem"); close_iter: close(iter_fd); @@ -1203,17 +1202,15 @@ static void test_bpf_sk_storage_get(void) do_dummy_read(skel->progs.fill_socket_owner); err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); - if (CHECK(err || val != getpid(), "bpf_map_lookup_elem", - "map value wasn't set correctly (expected %d, got %d, err=%d)\n", - getpid(), val, err)) + if (!ASSERT_OK(err, "bpf_map_lookup_elem") || + !ASSERT_EQ(val, getpid(), "bpf_map_lookup_elem")) goto close_socket; do_dummy_read(skel->progs.negate_socket_local_storage); err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); - CHECK(err || val != -getpid(), "bpf_map_lookup_elem", - "map value wasn't set correctly (expected %d, got %d, err=%d)\n", - -getpid(), val, err); + ASSERT_OK(err, "bpf_map_lookup_elem"); + ASSERT_EQ(val, -getpid(), "bpf_map_lookup_elem"); close_socket: close(sock_fd); @@ -1290,7 +1287,7 @@ static void test_bpf_sk_storage_map(void) /* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0) ; - if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + if (!ASSERT_GE(len, 0, "read")) goto close_iter; /* test results */ diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index 675b90b15280..f09d6ac2ef09 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -25,7 +25,7 @@ void serial_test_bpf_obj_id(void) */ __u32 map_ids[nr_iters + 1]; char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128]; - __u32 i, next_id, info_len, nr_id_found, duration = 0; + __u32 i, next_id, info_len, nr_id_found; struct timespec real_time_ts, boot_time_ts; int err = 0; __u64 array_value; @@ -33,16 +33,16 @@ void serial_test_bpf_obj_id(void) time_t now, load_time; err = bpf_prog_get_fd_by_id(0); - CHECK(err >= 0 || errno != ENOENT, - "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); + ASSERT_LT(err, 0, "bpf_prog_get_fd_by_id"); + ASSERT_EQ(errno, ENOENT, "bpf_prog_get_fd_by_id"); err = bpf_map_get_fd_by_id(0); - CHECK(err >= 0 || errno != ENOENT, - "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); + ASSERT_LT(err, 0, "bpf_map_get_fd_by_id"); + ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id"); err = bpf_link_get_fd_by_id(0); - CHECK(err >= 0 || errno != ENOENT, - "get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno); + ASSERT_LT(err, 0, "bpf_map_get_fd_by_id"); + ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id"); /* Check bpf_map_get_info_by_fd() */ bzero(zeros, sizeof(zeros)); @@ -53,25 +53,26 @@ void serial_test_bpf_obj_id(void) /* test_obj_id.o is a dumb prog. It should never fail * to load. */ - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "bpf_prog_test_load")) continue; /* Insert a magic value to the map */ map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); - if (CHECK_FAIL(map_fds[i] < 0)) + if (!ASSERT_GE(map_fds[i], 0, "bpf_find_map")) goto done; + err = bpf_map_update_elem(map_fds[i], &array_key, &array_magic_value, 0); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "bpf_map_update_elem")) goto done; - prog = bpf_object__find_program_by_name(objs[i], - "test_obj_id"); - if (CHECK_FAIL(!prog)) + prog = bpf_object__find_program_by_name(objs[i], "test_obj_id"); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) goto done; + links[i] = bpf_program__attach(prog); err = libbpf_get_error(links[i]); - if (CHECK(err, "prog_attach", "prog #%d, err %d\n", i, err)) { + if (!ASSERT_OK(err, "bpf_program__attach")) { links[i] = NULL; goto done; } @@ -81,24 +82,14 @@ void serial_test_bpf_obj_id(void) bzero(&map_infos[i], info_len); err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i], &info_len); - if (CHECK(err || - map_infos[i].type != BPF_MAP_TYPE_ARRAY || - map_infos[i].key_size != sizeof(__u32) || - map_infos[i].value_size != sizeof(__u64) || - map_infos[i].max_entries != 1 || - map_infos[i].map_flags != 0 || - info_len != sizeof(struct bpf_map_info) || - strcmp((char *)map_infos[i].name, expected_map_name), - "get-map-info(fd)", - "err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", - err, errno, - map_infos[i].type, BPF_MAP_TYPE_ARRAY, - info_len, sizeof(struct bpf_map_info), - map_infos[i].key_size, - map_infos[i].value_size, - map_infos[i].max_entries, - map_infos[i].map_flags, - map_infos[i].name, expected_map_name)) + if (!ASSERT_OK(err, "bpf_map_get_info_by_fd") || + !ASSERT_EQ(map_infos[i].type, BPF_MAP_TYPE_ARRAY, "map_type") || + !ASSERT_EQ(map_infos[i].key_size, sizeof(__u32), "key_size") || + !ASSERT_EQ(map_infos[i].value_size, sizeof(__u64), "value_size") || + !ASSERT_EQ(map_infos[i].max_entries, 1, "max_entries") || + !ASSERT_EQ(map_infos[i].map_flags, 0, "map_flags") || + !ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "map_info_len") || + !ASSERT_STREQ((char *)map_infos[i].name, expected_map_name, "map_name")) goto done; /* Check getting prog info */ @@ -112,48 +103,34 @@ void serial_test_bpf_obj_id(void) prog_infos[i].xlated_prog_len = sizeof(xlated_insns); prog_infos[i].map_ids = ptr_to_u64(map_ids + i); prog_infos[i].nr_map_ids = 2; + err = clock_gettime(CLOCK_REALTIME, &real_time_ts); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "clock_gettime")) goto done; + err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "clock_gettime")) goto done; + err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i], &info_len); load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + (prog_infos[i].load_time / nsec_per_sec); - if (CHECK(err || - prog_infos[i].type != BPF_PROG_TYPE_RAW_TRACEPOINT || - info_len != sizeof(struct bpf_prog_info) || - (env.jit_enabled && !prog_infos[i].jited_prog_len) || - (env.jit_enabled && - !memcmp(jited_insns, zeros, sizeof(zeros))) || - !prog_infos[i].xlated_prog_len || - !memcmp(xlated_insns, zeros, sizeof(zeros)) || - load_time < now - 60 || load_time > now + 60 || - prog_infos[i].created_by_uid != my_uid || - prog_infos[i].nr_map_ids != 1 || - *(int *)(long)prog_infos[i].map_ids != map_infos[i].id || - strcmp((char *)prog_infos[i].name, expected_prog_name), - "get-prog-info(fd)", - "err %d errno %d i %d type %d(%d) info_len %u(%zu) " - "jit_enabled %d jited_prog_len %u xlated_prog_len %u " - "jited_prog %d xlated_prog %d load_time %lu(%lu) " - "uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) " - "name %s(%s)\n", - err, errno, i, - prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, - info_len, sizeof(struct bpf_prog_info), - env.jit_enabled, - prog_infos[i].jited_prog_len, - prog_infos[i].xlated_prog_len, - !!memcmp(jited_insns, zeros, sizeof(zeros)), - !!memcmp(xlated_insns, zeros, sizeof(zeros)), - load_time, now, - prog_infos[i].created_by_uid, my_uid, - prog_infos[i].nr_map_ids, 1, - *(int *)(long)prog_infos[i].map_ids, map_infos[i].id, - prog_infos[i].name, expected_prog_name)) + + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") || + !ASSERT_EQ(prog_infos[i].type, BPF_PROG_TYPE_RAW_TRACEPOINT, "prog_type") || + !ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len") || + !ASSERT_FALSE((env.jit_enabled && !prog_infos[i].jited_prog_len), "jited_prog_len") || + !ASSERT_FALSE((env.jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))), + "jited_insns") || + !ASSERT_NEQ(prog_infos[i].xlated_prog_len, 0, "xlated_prog_len") || + !ASSERT_NEQ(memcmp(xlated_insns, zeros, sizeof(zeros)), 0, "xlated_insns") || + !ASSERT_GE(load_time, (now - 60), "load_time") || + !ASSERT_LE(load_time, (now + 60), "load_time") || + !ASSERT_EQ(prog_infos[i].created_by_uid, my_uid, "created_by_uid") || + !ASSERT_EQ(prog_infos[i].nr_map_ids, 1, "nr_map_ids") || + !ASSERT_EQ(*(int *)(long)prog_infos[i].map_ids, map_infos[i].id, "map_ids") || + !ASSERT_STREQ((char *)prog_infos[i].name, expected_prog_name, "prog_name")) goto done; /* Check getting link info */ @@ -163,25 +140,12 @@ void serial_test_bpf_obj_id(void) link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name); err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]), &link_infos[i], &info_len); - if (CHECK(err || - link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT || - link_infos[i].prog_id != prog_infos[i].id || - link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) || - strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), - "sys_enter") || - info_len != sizeof(struct bpf_link_info), - "get-link-info(fd)", - "err %d errno %d info_len %u(%zu) type %d(%d) id %d " - "prog_id %d (%d) tp_name %s(%s)\n", - err, errno, - info_len, sizeof(struct bpf_link_info), - link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, - link_infos[i].id, - link_infos[i].prog_id, prog_infos[i].id, - (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), - "sys_enter")) + if (!ASSERT_OK(err, "bpf_link_get_info_by_fd") || + !ASSERT_EQ(link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type") || + !ASSERT_EQ(link_infos[i].prog_id, prog_infos[i].id, "prog_id") || + !ASSERT_EQ(link_infos[i].raw_tracepoint.tp_name, ptr_to_u64(&tp_name), "&tp_name") || + !ASSERT_STREQ(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter", "tp_name")) goto done; - } /* Check bpf_prog_get_next_id() */ @@ -190,7 +154,7 @@ void serial_test_bpf_obj_id(void) while (!bpf_prog_get_next_id(next_id, &next_id)) { struct bpf_prog_info prog_info = {}; __u32 saved_map_id; - int prog_fd; + int prog_fd, cmp_res; info_len = sizeof(prog_info); @@ -198,9 +162,7 @@ void serial_test_bpf_obj_id(void) if (prog_fd < 0 && errno == ENOENT) /* The bpf_prog is in the dead row */ continue; - if (CHECK(prog_fd < 0, "get-prog-fd(next_id)", - "prog_fd %d next_id %d errno %d\n", - prog_fd, next_id, errno)) + if (!ASSERT_GE(prog_fd, 0, "bpf_prog_get_fd_by_id")) break; for (i = 0; i < nr_iters; i++) @@ -218,9 +180,8 @@ void serial_test_bpf_obj_id(void) */ prog_info.nr_map_ids = 1; err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len); - if (CHECK(!err || errno != EFAULT, - "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", - err, errno, EFAULT)) + if (!ASSERT_ERR(err, "bpf_prog_get_info_by_fd") || + !ASSERT_EQ(errno, EFAULT, "bpf_prog_get_info_by_fd")) break; bzero(&prog_info, sizeof(prog_info)); info_len = sizeof(prog_info); @@ -231,27 +192,22 @@ void serial_test_bpf_obj_id(void) err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len); prog_infos[i].jited_prog_insns = 0; prog_infos[i].xlated_prog_insns = 0; - CHECK(err || info_len != sizeof(struct bpf_prog_info) || - memcmp(&prog_info, &prog_infos[i], info_len) || - *(int *)(long)prog_info.map_ids != saved_map_id, - "get-prog-info(next_id->fd)", - "err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n", - err, errno, info_len, sizeof(struct bpf_prog_info), - memcmp(&prog_info, &prog_infos[i], info_len), - *(int *)(long)prog_info.map_ids, saved_map_id); + cmp_res = memcmp(&prog_info, &prog_infos[i], info_len); + + ASSERT_OK(err, "bpf_prog_get_info_by_fd"); + ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len"); + ASSERT_OK(cmp_res, "memcmp"); + ASSERT_EQ(*(int *)(long)prog_info.map_ids, saved_map_id, "map_id"); close(prog_fd); } - CHECK(nr_id_found != nr_iters, - "check total prog id found by get_next_id", - "nr_id_found %u(%u)\n", - nr_id_found, nr_iters); + ASSERT_EQ(nr_id_found, nr_iters, "prog_nr_id_found"); /* Check bpf_map_get_next_id() */ nr_id_found = 0; next_id = 0; while (!bpf_map_get_next_id(next_id, &next_id)) { struct bpf_map_info map_info = {}; - int map_fd; + int map_fd, cmp_res; info_len = sizeof(map_info); @@ -259,9 +215,7 @@ void serial_test_bpf_obj_id(void) if (map_fd < 0 && errno == ENOENT) /* The bpf_map is in the dead row */ continue; - if (CHECK(map_fd < 0, "get-map-fd(next_id)", - "map_fd %d next_id %u errno %d\n", - map_fd, next_id, errno)) + if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id")) break; for (i = 0; i < nr_iters; i++) @@ -274,25 +228,19 @@ void serial_test_bpf_obj_id(void) nr_id_found++; err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); - if (CHECK_FAIL(err)) + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) goto done; err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len); - CHECK(err || info_len != sizeof(struct bpf_map_info) || - memcmp(&map_info, &map_infos[i], info_len) || - array_value != array_magic_value, - "check get-map-info(next_id->fd)", - "err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n", - err, errno, info_len, sizeof(struct bpf_map_info), - memcmp(&map_info, &map_infos[i], info_len), - array_value, array_magic_value); + cmp_res = memcmp(&map_info, &map_infos[i], info_len); + ASSERT_OK(err, "bpf_map_get_info_by_fd"); + ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "info_len"); + ASSERT_OK(cmp_res, "memcmp"); + ASSERT_EQ(array_value, array_magic_value, "array_value"); close(map_fd); } - CHECK(nr_id_found != nr_iters, - "check total map id found by get_next_id", - "nr_id_found %u(%u)\n", - nr_id_found, nr_iters); + ASSERT_EQ(nr_id_found, nr_iters, "map_nr_id_found"); /* Check bpf_link_get_next_id() */ nr_id_found = 0; @@ -308,9 +256,7 @@ void serial_test_bpf_obj_id(void) if (link_fd < 0 && errno == ENOENT) /* The bpf_link is in the dead row */ continue; - if (CHECK(link_fd < 0, "get-link-fd(next_id)", - "link_fd %d next_id %u errno %d\n", - link_fd, next_id, errno)) + if (!ASSERT_GE(link_fd, 0, "bpf_link_get_fd_by_id")) break; for (i = 0; i < nr_iters; i++) @@ -325,17 +271,13 @@ void serial_test_bpf_obj_id(void) err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len); cmp_res = memcmp(&link_info, &link_infos[i], offsetof(struct bpf_link_info, raw_tracepoint)); - CHECK(err || info_len != sizeof(link_info) || cmp_res, - "check get-link-info(next_id->fd)", - "err %d errno %d info_len %u(%zu) memcmp %d\n", - err, errno, info_len, sizeof(struct bpf_link_info), - cmp_res); + ASSERT_OK(err, "bpf_link_get_info_by_fd"); + ASSERT_EQ(info_len, sizeof(link_info), "info_len"); + ASSERT_OK(cmp_res, "memcmp"); close(link_fd); } - CHECK(nr_id_found != nr_iters, - "check total link id found by get_next_id", - "nr_id_found %u(%u)\n", nr_id_found, nr_iters); + ASSERT_EQ(nr_id_found, nr_iters, "link_nr_id_found"); done: for (i = 0; i < nr_iters; i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 4aabeaa525d4..a88e6e07e4f5 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -20,15 +20,14 @@ static const unsigned int total_bytes = 10 * 1024 * 1024; static int expected_stg = 0xeB9F; -static int stop, duration; +static int stop; static int settcpca(int fd, const char *tcp_ca) { int err; err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca)); - if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n", - errno)) + if (!ASSERT_NEQ(err, -1, "setsockopt")) return -1; return 0; @@ -65,8 +64,7 @@ static void *server(void *arg) bytes += nr_sent; } - CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n", - bytes, total_bytes, nr_sent, errno); + ASSERT_EQ(bytes, total_bytes, "send"); done: if (fd >= 0) @@ -92,10 +90,11 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) WRITE_ONCE(stop, 0); lfd = socket(AF_INET6, SOCK_STREAM, 0); - if (CHECK(lfd == -1, "socket", "errno:%d\n", errno)) + if (!ASSERT_NEQ(lfd, -1, "socket")) return; + fd = socket(AF_INET6, SOCK_STREAM, 0); - if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) { + if (!ASSERT_NEQ(fd, -1, "socket")) { close(lfd); return; } @@ -108,26 +107,27 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) sa6.sin6_family = AF_INET6; sa6.sin6_addr = in6addr_loopback; err = bind(lfd, (struct sockaddr *)&sa6, addrlen); - if (CHECK(err == -1, "bind", "errno:%d\n", errno)) + if (!ASSERT_NEQ(err, -1, "bind")) goto done; + err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen); - if (CHECK(err == -1, "getsockname", "errno:%d\n", errno)) + if (!ASSERT_NEQ(err, -1, "getsockname")) goto done; + err = listen(lfd, 1); - if (CHECK(err == -1, "listen", "errno:%d\n", errno)) + if (!ASSERT_NEQ(err, -1, "listen")) goto done; if (sk_stg_map) { err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd, &expected_stg, BPF_NOEXIST); - if (CHECK(err, "bpf_map_update_elem(sk_stg_map)", - "err:%d errno:%d\n", err, errno)) + if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)")) goto done; } /* connect to server */ err = connect(fd, (struct sockaddr *)&sa6, addrlen); - if (CHECK(err == -1, "connect", "errno:%d\n", errno)) + if (!ASSERT_NEQ(err, -1, "connect")) goto done; if (sk_stg_map) { @@ -135,14 +135,13 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd, &tmp_stg); - if (CHECK(!err || errno != ENOENT, - "bpf_map_lookup_elem(sk_stg_map)", - "err:%d errno:%d\n", err, errno)) + if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") || + !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)")) goto done; } err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd); - if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno)) + if (!ASSERT_OK(err, "pthread_create")) goto done; /* recv total_bytes */ @@ -156,13 +155,12 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) bytes += nr_recv; } - CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n", - bytes, total_bytes, nr_recv, errno); + ASSERT_EQ(bytes, total_bytes, "recv"); WRITE_ONCE(stop, 1); pthread_join(srv_thread, &thread_ret); - CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld", - PTR_ERR(thread_ret)); + ASSERT_OK(IS_ERR(thread_ret), "thread_ret"); + done: close(lfd); close(fd); @@ -174,7 +172,7 @@ static void test_cubic(void) struct bpf_link *link; cubic_skel = bpf_cubic__open_and_load(); - if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n")) + if (!ASSERT_OK_PTR(cubic_skel, "bpf_cubic__open_and_load")) return; link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic); @@ -197,7 +195,7 @@ static void test_dctcp(void) struct bpf_link *link; dctcp_skel = bpf_dctcp__open_and_load(); - if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n")) + if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load")) return; link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); @@ -207,9 +205,7 @@ static void test_dctcp(void) } do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map); - CHECK(dctcp_skel->bss->stg_result != expected_stg, - "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n", - dctcp_skel->bss->stg_result, expected_stg); + ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result"); bpf_link__destroy(link); bpf_dctcp__destroy(dctcp_skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 731c343897d8..e770912fc1d2 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -35,7 +35,7 @@ static int check_load(const char *file, enum bpf_prog_type type) } bpf_program__set_type(prog, type); - bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS); bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags); err = bpf_object__load(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 92d51f377fe5..8fb4a04fbbc0 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -5265,6 +5265,7 @@ static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind) #endif assert(0); + return 0; } static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c new file mode 100644 index 000000000000..74d6d7546f40 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */ + +#include <sys/types.h> +#include <unistd.h> +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "test_cgroup1_hierarchy.skel.h" + +static void bpf_cgroup1(struct test_cgroup1_hierarchy *skel) +{ + struct bpf_link *lsm_link, *fentry_link; + int err; + + /* Attach LSM prog first */ + lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run); + if (!ASSERT_OK_PTR(lsm_link, "lsm_attach")) + return; + + /* LSM prog will be triggered when attaching fentry */ + fentry_link = bpf_program__attach_trace(skel->progs.fentry_run); + ASSERT_NULL(fentry_link, "fentry_attach_fail"); + + err = bpf_link__destroy(lsm_link); + ASSERT_OK(err, "destroy_lsm"); +} + +static void bpf_cgroup1_sleepable(struct test_cgroup1_hierarchy *skel) +{ + struct bpf_link *lsm_link, *fentry_link; + int err; + + /* Attach LSM prog first */ + lsm_link = bpf_program__attach_lsm(skel->progs.lsm_s_run); + if (!ASSERT_OK_PTR(lsm_link, "lsm_attach")) + return; + + /* LSM prog will be triggered when attaching fentry */ + fentry_link = bpf_program__attach_trace(skel->progs.fentry_run); + ASSERT_NULL(fentry_link, "fentry_attach_fail"); + + err = bpf_link__destroy(lsm_link); + ASSERT_OK(err, "destroy_lsm"); +} + +static void bpf_cgroup1_invalid_id(struct test_cgroup1_hierarchy *skel) +{ + struct bpf_link *lsm_link, *fentry_link; + int err; + + /* Attach LSM prog first */ + lsm_link = bpf_program__attach_lsm(skel->progs.lsm_run); + if (!ASSERT_OK_PTR(lsm_link, "lsm_attach")) + return; + + /* LSM prog will be triggered when attaching fentry */ + fentry_link = bpf_program__attach_trace(skel->progs.fentry_run); + if (!ASSERT_OK_PTR(fentry_link, "fentry_attach_success")) + goto cleanup; + + err = bpf_link__destroy(fentry_link); + ASSERT_OK(err, "destroy_lsm"); + +cleanup: + err = bpf_link__destroy(lsm_link); + ASSERT_OK(err, "destroy_fentry"); +} + +void test_cgroup1_hierarchy(void) +{ + struct test_cgroup1_hierarchy *skel; + __u64 current_cgid; + int hid, err; + + skel = test_cgroup1_hierarchy__open(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + skel->bss->target_pid = getpid(); + + err = bpf_program__set_attach_target(skel->progs.fentry_run, 0, "bpf_fentry_test1"); + if (!ASSERT_OK(err, "fentry_set_target")) + goto destroy; + + err = test_cgroup1_hierarchy__load(skel); + if (!ASSERT_OK(err, "load")) + goto destroy; + + /* Setup cgroup1 hierarchy */ + err = setup_classid_environment(); + if (!ASSERT_OK(err, "setup_classid_environment")) + goto destroy; + + err = join_classid(); + if (!ASSERT_OK(err, "join_cgroup1")) + goto cleanup; + + current_cgid = get_classid_cgroup_id(); + if (!ASSERT_GE(current_cgid, 0, "cgroup1 id")) + goto cleanup; + + hid = get_cgroup1_hierarchy_id("net_cls"); + if (!ASSERT_GE(hid, 0, "cgroup1 id")) + goto cleanup; + skel->bss->target_hid = hid; + + if (test__start_subtest("test_cgroup1_hierarchy")) { + skel->bss->target_ancestor_cgid = current_cgid; + bpf_cgroup1(skel); + } + + if (test__start_subtest("test_root_cgid")) { + skel->bss->target_ancestor_cgid = 1; + skel->bss->target_ancestor_level = 0; + bpf_cgroup1(skel); + } + + if (test__start_subtest("test_invalid_level")) { + skel->bss->target_ancestor_cgid = 1; + skel->bss->target_ancestor_level = 1; + bpf_cgroup1_invalid_id(skel); + } + + if (test__start_subtest("test_invalid_cgid")) { + skel->bss->target_ancestor_cgid = 0; + bpf_cgroup1_invalid_id(skel); + } + + if (test__start_subtest("test_invalid_hid")) { + skel->bss->target_ancestor_cgid = 1; + skel->bss->target_ancestor_level = 0; + skel->bss->target_hid = -1; + bpf_cgroup1_invalid_id(skel); + } + + if (test__start_subtest("test_invalid_cgrp_name")) { + skel->bss->target_hid = get_cgroup1_hierarchy_id("net_cl"); + skel->bss->target_ancestor_cgid = current_cgid; + bpf_cgroup1_invalid_id(skel); + } + + if (test__start_subtest("test_invalid_cgrp_name2")) { + skel->bss->target_hid = get_cgroup1_hierarchy_id("net_cls,"); + skel->bss->target_ancestor_cgid = current_cgid; + bpf_cgroup1_invalid_id(skel); + } + + if (test__start_subtest("test_sleepable_prog")) { + skel->bss->target_hid = hid; + skel->bss->target_ancestor_cgid = current_cgid; + bpf_cgroup1_sleepable(skel); + } + +cleanup: + cleanup_classid_environment(); +destroy: + test_cgroup1_hierarchy__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c index 9026b42914d3..addf720428f7 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -71,7 +71,7 @@ void test_cgroup_v1v2(void) } ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only"); setup_classid_environment(); - set_classid(42); + set_classid(); ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2"); cleanup_classid_environment(); close(server_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c index b25b870f87ba..e6e50a394472 100644 --- a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c @@ -73,6 +73,37 @@ static void test_local_kptr_stash_unstash(void) local_kptr_stash__destroy(skel); } +static void test_refcount_acquire_without_unstash(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct local_kptr_stash *skel; + int ret; + + skel = local_kptr_stash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash), + &opts); + ASSERT_OK(ret, "refcount_acquire_without_unstash run"); + ASSERT_EQ(opts.retval, 2, "refcount_acquire_without_unstash retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_refcounted_node), &opts); + ASSERT_OK(ret, "stash_refcounted_node run"); + ASSERT_OK(opts.retval, "stash_refcounted_node retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.refcount_acquire_without_unstash), + &opts); + ASSERT_OK(ret, "refcount_acquire_without_unstash (2) run"); + ASSERT_EQ(opts.retval, 42, "refcount_acquire_without_unstash (2) retval"); + + local_kptr_stash__destroy(skel); +} + static void test_local_kptr_stash_fail(void) { RUN_TESTS(local_kptr_stash_fail); @@ -86,6 +117,8 @@ void test_local_kptr_stash(void) test_local_kptr_stash_plain(); if (test__start_subtest("local_kptr_stash_unstash")) test_local_kptr_stash_unstash(); + if (test__start_subtest("refcount_acquire_without_unstash")) + test_refcount_acquire_without_unstash(); if (test__start_subtest("local_kptr_stash_fail")) test_local_kptr_stash_fail(); } diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index fe9a23e65ef4..0f7ea4d7d9f6 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -78,7 +78,7 @@ static void obj_load_log_buf(void) ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"), "libbpf_log_not_empty"); ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty"); - ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), + ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx() R10=fp0"), "good_log_verbose"); ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"), "bad_log_not_empty"); @@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void) opts.log_level = 2; fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", good_prog_insns, good_prog_insn_cnt, &opts); - ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2"); + ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx() R10=fp0"), "good_log_2"); ASSERT_GE(fd, 0, "good_fd2"); if (fd >= 0) close(fd); diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c new file mode 100644 index 000000000000..0c9abd279e18 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c @@ -0,0 +1,2124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include <limits.h> +#include <test_progs.h> +#include <linux/filter.h> +#include <linux/bpf.h> + +/* ================================= + * SHORT AND CONSISTENT NUMBER TYPES + * ================================= + */ +#define U64_MAX ((u64)UINT64_MAX) +#define U32_MAX ((u32)UINT_MAX) +#define U16_MAX ((u32)UINT_MAX) +#define S64_MIN ((s64)INT64_MIN) +#define S64_MAX ((s64)INT64_MAX) +#define S32_MIN ((s32)INT_MIN) +#define S32_MAX ((s32)INT_MAX) +#define S16_MIN ((s16)0x80000000) +#define S16_MAX ((s16)0x7fffffff) + +typedef unsigned long long ___u64; +typedef unsigned int ___u32; +typedef long long ___s64; +typedef int ___s32; + +/* avoid conflicts with already defined types in kernel headers */ +#define u64 ___u64 +#define u32 ___u32 +#define s64 ___s64 +#define s32 ___s32 + +/* ================================== + * STRING BUF ABSTRACTION AND HELPERS + * ================================== + */ +struct strbuf { + size_t buf_sz; + int pos; + char buf[0]; +}; + +#define DEFINE_STRBUF(name, N) \ + struct { struct strbuf buf; char data[(N)]; } ___##name; \ + struct strbuf *name = (___##name.buf.buf_sz = (N), ___##name.buf.pos = 0, &___##name.buf) + +__printf(2, 3) +static inline void snappendf(struct strbuf *s, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + s->pos += vsnprintf(s->buf + s->pos, + s->pos < s->buf_sz ? s->buf_sz - s->pos : 0, + fmt, args); + va_end(args); +} + +/* ================================== + * GENERIC NUMBER TYPE AND OPERATIONS + * ================================== + */ +enum num_t { U64, first_t = U64, U32, S64, S32, last_t = S32 }; + +static __always_inline u64 min_t(enum num_t t, u64 x, u64 y) +{ + switch (t) { + case U64: return (u64)x < (u64)y ? (u64)x : (u64)y; + case U32: return (u32)x < (u32)y ? (u32)x : (u32)y; + case S64: return (s64)x < (s64)y ? (s64)x : (s64)y; + case S32: return (s32)x < (s32)y ? (s32)x : (s32)y; + default: printf("min_t!\n"); exit(1); + } +} + +static __always_inline u64 max_t(enum num_t t, u64 x, u64 y) +{ + switch (t) { + case U64: return (u64)x > (u64)y ? (u64)x : (u64)y; + case U32: return (u32)x > (u32)y ? (u32)x : (u32)y; + case S64: return (s64)x > (s64)y ? (s64)x : (s64)y; + case S32: return (s32)x > (s32)y ? (u32)(s32)x : (u32)(s32)y; + default: printf("max_t!\n"); exit(1); + } +} + +static __always_inline u64 cast_t(enum num_t t, u64 x) +{ + switch (t) { + case U64: return (u64)x; + case U32: return (u32)x; + case S64: return (s64)x; + case S32: return (u32)(s32)x; + default: printf("cast_t!\n"); exit(1); + } +} + +static const char *t_str(enum num_t t) +{ + switch (t) { + case U64: return "u64"; + case U32: return "u32"; + case S64: return "s64"; + case S32: return "s32"; + default: printf("t_str!\n"); exit(1); + } +} + +static enum num_t t_is_32(enum num_t t) +{ + switch (t) { + case U64: return false; + case U32: return true; + case S64: return false; + case S32: return true; + default: printf("t_is_32!\n"); exit(1); + } +} + +static enum num_t t_signed(enum num_t t) +{ + switch (t) { + case U64: return S64; + case U32: return S32; + case S64: return S64; + case S32: return S32; + default: printf("t_signed!\n"); exit(1); + } +} + +static enum num_t t_unsigned(enum num_t t) +{ + switch (t) { + case U64: return U64; + case U32: return U32; + case S64: return U64; + case S32: return U32; + default: printf("t_unsigned!\n"); exit(1); + } +} + +#define UNUM_MAX_DECIMAL U16_MAX +#define SNUM_MAX_DECIMAL S16_MAX +#define SNUM_MIN_DECIMAL S16_MIN + +static bool num_is_small(enum num_t t, u64 x) +{ + switch (t) { + case U64: return (u64)x <= UNUM_MAX_DECIMAL; + case U32: return (u32)x <= UNUM_MAX_DECIMAL; + case S64: return (s64)x >= SNUM_MIN_DECIMAL && (s64)x <= SNUM_MAX_DECIMAL; + case S32: return (s32)x >= SNUM_MIN_DECIMAL && (s32)x <= SNUM_MAX_DECIMAL; + default: printf("num_is_small!\n"); exit(1); + } +} + +static void snprintf_num(enum num_t t, struct strbuf *sb, u64 x) +{ + bool is_small = num_is_small(t, x); + + if (is_small) { + switch (t) { + case U64: return snappendf(sb, "%llu", (u64)x); + case U32: return snappendf(sb, "%u", (u32)x); + case S64: return snappendf(sb, "%lld", (s64)x); + case S32: return snappendf(sb, "%d", (s32)x); + default: printf("snprintf_num!\n"); exit(1); + } + } else { + switch (t) { + case U64: + if (x == U64_MAX) + return snappendf(sb, "U64_MAX"); + else if (x >= U64_MAX - 256) + return snappendf(sb, "U64_MAX-%llu", U64_MAX - x); + else + return snappendf(sb, "%#llx", (u64)x); + case U32: + if ((u32)x == U32_MAX) + return snappendf(sb, "U32_MAX"); + else if ((u32)x >= U32_MAX - 256) + return snappendf(sb, "U32_MAX-%u", U32_MAX - (u32)x); + else + return snappendf(sb, "%#x", (u32)x); + case S64: + if ((s64)x == S64_MAX) + return snappendf(sb, "S64_MAX"); + else if ((s64)x >= S64_MAX - 256) + return snappendf(sb, "S64_MAX-%lld", S64_MAX - (s64)x); + else if ((s64)x == S64_MIN) + return snappendf(sb, "S64_MIN"); + else if ((s64)x <= S64_MIN + 256) + return snappendf(sb, "S64_MIN+%lld", (s64)x - S64_MIN); + else + return snappendf(sb, "%#llx", (s64)x); + case S32: + if ((s32)x == S32_MAX) + return snappendf(sb, "S32_MAX"); + else if ((s32)x >= S32_MAX - 256) + return snappendf(sb, "S32_MAX-%d", S32_MAX - (s32)x); + else if ((s32)x == S32_MIN) + return snappendf(sb, "S32_MIN"); + else if ((s32)x <= S32_MIN + 256) + return snappendf(sb, "S32_MIN+%d", (s32)x - S32_MIN); + else + return snappendf(sb, "%#x", (s32)x); + default: printf("snprintf_num!\n"); exit(1); + } + } +} + +/* =================================== + * GENERIC RANGE STRUCT AND OPERATIONS + * =================================== + */ +struct range { + u64 a, b; +}; + +static void snprintf_range(enum num_t t, struct strbuf *sb, struct range x) +{ + if (x.a == x.b) + return snprintf_num(t, sb, x.a); + + snappendf(sb, "["); + snprintf_num(t, sb, x.a); + snappendf(sb, "; "); + snprintf_num(t, sb, x.b); + snappendf(sb, "]"); +} + +static void print_range(enum num_t t, struct range x, const char *sfx) +{ + DEFINE_STRBUF(sb, 128); + + snprintf_range(t, sb, x); + printf("%s%s", sb->buf, sfx); +} + +static const struct range unkn[] = { + [U64] = { 0, U64_MAX }, + [U32] = { 0, U32_MAX }, + [S64] = { (u64)S64_MIN, (u64)S64_MAX }, + [S32] = { (u64)(u32)S32_MIN, (u64)(u32)S32_MAX }, +}; + +static struct range unkn_subreg(enum num_t t) +{ + switch (t) { + case U64: return unkn[U32]; + case U32: return unkn[U32]; + case S64: return unkn[U32]; + case S32: return unkn[S32]; + default: printf("unkn_subreg!\n"); exit(1); + } +} + +static struct range range(enum num_t t, u64 a, u64 b) +{ + switch (t) { + case U64: return (struct range){ (u64)a, (u64)b }; + case U32: return (struct range){ (u32)a, (u32)b }; + case S64: return (struct range){ (s64)a, (s64)b }; + case S32: return (struct range){ (u32)(s32)a, (u32)(s32)b }; + default: printf("range!\n"); exit(1); + } +} + +static __always_inline u32 sign64(u64 x) { return (x >> 63) & 1; } +static __always_inline u32 sign32(u64 x) { return ((u32)x >> 31) & 1; } +static __always_inline u32 upper32(u64 x) { return (u32)(x >> 32); } +static __always_inline u64 swap_low32(u64 x, u32 y) { return (x & 0xffffffff00000000ULL) | y; } + +static bool range_eq(struct range x, struct range y) +{ + return x.a == y.a && x.b == y.b; +} + +static struct range range_cast_to_s32(struct range x) +{ + u64 a = x.a, b = x.b; + + /* if upper 32 bits are constant, lower 32 bits should form a proper + * s32 range to be correct + */ + if (upper32(a) == upper32(b) && (s32)a <= (s32)b) + return range(S32, a, b); + + /* Special case where upper bits form a small sequence of two + * sequential numbers (in 32-bit unsigned space, so 0xffffffff to + * 0x00000000 is also valid), while lower bits form a proper s32 range + * going from negative numbers to positive numbers. + * + * E.g.: [0xfffffff0ffffff00; 0xfffffff100000010]. Iterating + * over full 64-bit numbers range will form a proper [-16, 16] + * ([0xffffff00; 0x00000010]) range in its lower 32 bits. + */ + if (upper32(a) + 1 == upper32(b) && (s32)a < 0 && (s32)b >= 0) + return range(S32, a, b); + + /* otherwise we can't derive much meaningful information */ + return unkn[S32]; +} + +static struct range range_cast_u64(enum num_t to_t, struct range x) +{ + u64 a = (u64)x.a, b = (u64)x.b; + + switch (to_t) { + case U64: + return x; + case U32: + if (upper32(a) != upper32(b)) + return unkn[U32]; + return range(U32, a, b); + case S64: + if (sign64(a) != sign64(b)) + return unkn[S64]; + return range(S64, a, b); + case S32: + return range_cast_to_s32(x); + default: printf("range_cast_u64!\n"); exit(1); + } +} + +static struct range range_cast_s64(enum num_t to_t, struct range x) +{ + s64 a = (s64)x.a, b = (s64)x.b; + + switch (to_t) { + case U64: + /* equivalent to (s64)a <= (s64)b check */ + if (sign64(a) != sign64(b)) + return unkn[U64]; + return range(U64, a, b); + case U32: + if (upper32(a) != upper32(b) || sign32(a) != sign32(b)) + return unkn[U32]; + return range(U32, a, b); + case S64: + return x; + case S32: + return range_cast_to_s32(x); + default: printf("range_cast_s64!\n"); exit(1); + } +} + +static struct range range_cast_u32(enum num_t to_t, struct range x) +{ + u32 a = (u32)x.a, b = (u32)x.b; + + switch (to_t) { + case U64: + case S64: + /* u32 is always a valid zero-extended u64/s64 */ + return range(to_t, a, b); + case U32: + return x; + case S32: + return range_cast_to_s32(range(U32, a, b)); + default: printf("range_cast_u32!\n"); exit(1); + } +} + +static struct range range_cast_s32(enum num_t to_t, struct range x) +{ + s32 a = (s32)x.a, b = (s32)x.b; + + switch (to_t) { + case U64: + case U32: + case S64: + if (sign32(a) != sign32(b)) + return unkn[to_t]; + return range(to_t, a, b); + case S32: + return x; + default: printf("range_cast_s32!\n"); exit(1); + } +} + +/* Reinterpret range in *from_t* domain as a range in *to_t* domain preserving + * all possible information. Worst case, it will be unknown range within + * *to_t* domain, if nothing more specific can be guaranteed during the + * conversion + */ +static struct range range_cast(enum num_t from_t, enum num_t to_t, struct range from) +{ + switch (from_t) { + case U64: return range_cast_u64(to_t, from); + case U32: return range_cast_u32(to_t, from); + case S64: return range_cast_s64(to_t, from); + case S32: return range_cast_s32(to_t, from); + default: printf("range_cast!\n"); exit(1); + } +} + +static bool is_valid_num(enum num_t t, u64 x) +{ + switch (t) { + case U64: return true; + case U32: return upper32(x) == 0; + case S64: return true; + case S32: return upper32(x) == 0; + default: printf("is_valid_num!\n"); exit(1); + } +} + +static bool is_valid_range(enum num_t t, struct range x) +{ + if (!is_valid_num(t, x.a) || !is_valid_num(t, x.b)) + return false; + + switch (t) { + case U64: return (u64)x.a <= (u64)x.b; + case U32: return (u32)x.a <= (u32)x.b; + case S64: return (s64)x.a <= (s64)x.b; + case S32: return (s32)x.a <= (s32)x.b; + default: printf("is_valid_range!\n"); exit(1); + } +} + +static struct range range_improve(enum num_t t, struct range old, struct range new) +{ + return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b)); +} + +static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y) +{ + struct range y_cast; + + y_cast = range_cast(y_t, x_t, y); + + /* the case when new range knowledge, *y*, is a 32-bit subregister + * range, while previous range knowledge, *x*, is a full register + * 64-bit range, needs special treatment to take into account upper 32 + * bits of full register range + */ + if (t_is_32(y_t) && !t_is_32(x_t)) { + struct range x_swap; + + /* some combinations of upper 32 bits and sign bit can lead to + * invalid ranges, in such cases it's easier to detect them + * after cast/swap than try to enumerate all the conditions + * under which transformation and knowledge transfer is valid + */ + x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b)); + if (!is_valid_range(x_t, x_swap)) + return x; + return range_improve(x_t, x, x_swap); + } + + /* otherwise, plain range cast and intersection works */ + return range_improve(x_t, x, y_cast); +} + +/* ======================= + * GENERIC CONDITIONAL OPS + * ======================= + */ +enum op { OP_LT, OP_LE, OP_GT, OP_GE, OP_EQ, OP_NE, first_op = OP_LT, last_op = OP_NE }; + +static enum op complement_op(enum op op) +{ + switch (op) { + case OP_LT: return OP_GE; + case OP_LE: return OP_GT; + case OP_GT: return OP_LE; + case OP_GE: return OP_LT; + case OP_EQ: return OP_NE; + case OP_NE: return OP_EQ; + default: printf("complement_op!\n"); exit(1); + } +} + +static const char *op_str(enum op op) +{ + switch (op) { + case OP_LT: return "<"; + case OP_LE: return "<="; + case OP_GT: return ">"; + case OP_GE: return ">="; + case OP_EQ: return "=="; + case OP_NE: return "!="; + default: printf("op_str!\n"); exit(1); + } +} + +/* Can register with range [x.a, x.b] *EVER* satisfy + * OP (<, <=, >, >=, ==, !=) relation to + * a regsiter with range [y.a, y.b] + * _in *num_t* domain_ + */ +static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op) +{ +#define range_canbe(T) do { \ + switch (op) { \ + case OP_LT: return (T)x.a < (T)y.b; \ + case OP_LE: return (T)x.a <= (T)y.b; \ + case OP_GT: return (T)x.b > (T)y.a; \ + case OP_GE: return (T)x.b >= (T)y.a; \ + case OP_EQ: return (T)max_t(t, x.a, y.a) <= (T)min_t(t, x.b, y.b); \ + case OP_NE: return !((T)x.a == (T)x.b && (T)y.a == (T)y.b && (T)x.a == (T)y.a); \ + default: printf("range_canbe op %d\n", op); exit(1); \ + } \ +} while (0) + + switch (t) { + case U64: { range_canbe(u64); } + case U32: { range_canbe(u32); } + case S64: { range_canbe(s64); } + case S32: { range_canbe(s32); } + default: printf("range_canbe!\n"); exit(1); + } +#undef range_canbe +} + +/* Does register with range [x.a, x.b] *ALWAYS* satisfy + * OP (<, <=, >, >=, ==, !=) relation to + * a regsiter with range [y.a, y.b] + * _in *num_t* domain_ + */ +static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op) +{ + /* always op <=> ! canbe complement(op) */ + return !range_canbe_op(t, x, y, complement_op(op)); +} + +/* Does register with range [x.a, x.b] *NEVER* satisfy + * OP (<, <=, >, >=, ==, !=) relation to + * a regsiter with range [y.a, y.b] + * _in *num_t* domain_ + */ +static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op) +{ + return !range_canbe_op(t, x, y, op); +} + +/* similar to verifier's is_branch_taken(): + * 1 - always taken; + * 0 - never taken, + * -1 - unsure. + */ +static int range_branch_taken_op(enum num_t t, struct range x, struct range y, enum op op) +{ + if (range_always_op(t, x, y, op)) + return 1; + if (range_never_op(t, x, y, op)) + return 0; + return -1; +} + +/* What would be the new estimates for register x and y ranges assuming truthful + * OP comparison between them. I.e., (x OP y == true) => x <- newx, y <- newy. + * + * We assume "interesting" cases where ranges overlap. Cases where it's + * obvious that (x OP y) is either always true or false should be filtered with + * range_never and range_always checks. + */ +static void range_cond(enum num_t t, struct range x, struct range y, + enum op op, struct range *newx, struct range *newy) +{ + if (!range_canbe_op(t, x, y, op)) { + /* nothing to adjust, can't happen, return original values */ + *newx = x; + *newy = y; + return; + } + switch (op) { + case OP_LT: + *newx = range(t, x.a, min_t(t, x.b, y.b - 1)); + *newy = range(t, max_t(t, x.a + 1, y.a), y.b); + break; + case OP_LE: + *newx = range(t, x.a, min_t(t, x.b, y.b)); + *newy = range(t, max_t(t, x.a, y.a), y.b); + break; + case OP_GT: + *newx = range(t, max_t(t, x.a, y.a + 1), x.b); + *newy = range(t, y.a, min_t(t, x.b - 1, y.b)); + break; + case OP_GE: + *newx = range(t, max_t(t, x.a, y.a), x.b); + *newy = range(t, y.a, min_t(t, x.b, y.b)); + break; + case OP_EQ: + *newx = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b)); + *newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b)); + break; + case OP_NE: + /* generic case, can't derive more information */ + *newx = range(t, x.a, x.b); + *newy = range(t, y.a, y.b); + break; + + /* below extended logic is not supported by verifier just yet */ + if (x.a == x.b && x.a == y.a) { + /* X is a constant matching left side of Y */ + *newx = range(t, x.a, x.b); + *newy = range(t, y.a + 1, y.b); + } else if (x.a == x.b && x.b == y.b) { + /* X is a constant matching rigth side of Y */ + *newx = range(t, x.a, x.b); + *newy = range(t, y.a, y.b - 1); + } else if (y.a == y.b && x.a == y.a) { + /* Y is a constant matching left side of X */ + *newx = range(t, x.a + 1, x.b); + *newy = range(t, y.a, y.b); + } else if (y.a == y.b && x.b == y.b) { + /* Y is a constant matching rigth side of X */ + *newx = range(t, x.a, x.b - 1); + *newy = range(t, y.a, y.b); + } else { + /* generic case, can't derive more information */ + *newx = range(t, x.a, x.b); + *newy = range(t, y.a, y.b); + } + + break; + default: + break; + } +} + +/* ======================= + * REGISTER STATE HANDLING + * ======================= + */ +struct reg_state { + struct range r[4]; /* indexed by enum num_t: U64, U32, S64, S32 */ + bool valid; +}; + +static void print_reg_state(struct reg_state *r, const char *sfx) +{ + DEFINE_STRBUF(sb, 512); + enum num_t t; + int cnt = 0; + + if (!r->valid) { + printf("<not found>%s", sfx); + return; + } + + snappendf(sb, "scalar("); + for (t = first_t; t <= last_t; t++) { + snappendf(sb, "%s%s=", cnt++ ? "," : "", t_str(t)); + snprintf_range(t, sb, r->r[t]); + } + snappendf(sb, ")"); + + printf("%s%s", sb->buf, sfx); +} + +static void print_refinement(enum num_t s_t, struct range src, + enum num_t d_t, struct range old, struct range new, + const char *ctx) +{ + printf("REFINING (%s) (%s)SRC=", ctx, t_str(s_t)); + print_range(s_t, src, ""); + printf(" (%s)DST_OLD=", t_str(d_t)); + print_range(d_t, old, ""); + printf(" (%s)DST_NEW=", t_str(d_t)); + print_range(d_t, new, "\n"); +} + +static void reg_state_refine(struct reg_state *r, enum num_t t, struct range x, const char *ctx) +{ + enum num_t d_t, s_t; + struct range old; + bool keep_going = false; + +again: + /* try to derive new knowledge from just learned range x of type t */ + for (d_t = first_t; d_t <= last_t; d_t++) { + old = r->r[d_t]; + r->r[d_t] = range_refine(d_t, r->r[d_t], t, x); + if (!range_eq(r->r[d_t], old)) { + keep_going = true; + if (env.verbosity >= VERBOSE_VERY) + print_refinement(t, x, d_t, old, r->r[d_t], ctx); + } + } + + /* now see if we can derive anything new from updated reg_state's ranges */ + for (s_t = first_t; s_t <= last_t; s_t++) { + for (d_t = first_t; d_t <= last_t; d_t++) { + old = r->r[d_t]; + r->r[d_t] = range_refine(d_t, r->r[d_t], s_t, r->r[s_t]); + if (!range_eq(r->r[d_t], old)) { + keep_going = true; + if (env.verbosity >= VERBOSE_VERY) + print_refinement(s_t, r->r[s_t], d_t, old, r->r[d_t], ctx); + } + } + } + + /* keep refining until we converge */ + if (keep_going) { + keep_going = false; + goto again; + } +} + +static void reg_state_set_const(struct reg_state *rs, enum num_t t, u64 val) +{ + enum num_t tt; + + rs->valid = true; + for (tt = first_t; tt <= last_t; tt++) + rs->r[tt] = tt == t ? range(t, val, val) : unkn[tt]; + + reg_state_refine(rs, t, rs->r[t], "CONST"); +} + +static void reg_state_cond(enum num_t t, struct reg_state *x, struct reg_state *y, enum op op, + struct reg_state *newx, struct reg_state *newy, const char *ctx) +{ + char buf[32]; + enum num_t ts[2]; + struct reg_state xx = *x, yy = *y; + int i, t_cnt; + struct range z1, z2; + + if (op == OP_EQ || op == OP_NE) { + /* OP_EQ and OP_NE are sign-agnostic, so we need to process + * both signed and unsigned domains at the same time + */ + ts[0] = t_unsigned(t); + ts[1] = t_signed(t); + t_cnt = 2; + } else { + ts[0] = t; + t_cnt = 1; + } + + for (i = 0; i < t_cnt; i++) { + t = ts[i]; + z1 = x->r[t]; + z2 = y->r[t]; + + range_cond(t, z1, z2, op, &z1, &z2); + + if (newx) { + snprintf(buf, sizeof(buf), "%s R1", ctx); + reg_state_refine(&xx, t, z1, buf); + } + if (newy) { + snprintf(buf, sizeof(buf), "%s R2", ctx); + reg_state_refine(&yy, t, z2, buf); + } + } + + if (newx) + *newx = xx; + if (newy) + *newy = yy; +} + +static int reg_state_branch_taken_op(enum num_t t, struct reg_state *x, struct reg_state *y, + enum op op) +{ + if (op == OP_EQ || op == OP_NE) { + /* OP_EQ and OP_NE are sign-agnostic */ + enum num_t tu = t_unsigned(t); + enum num_t ts = t_signed(t); + int br_u, br_s, br; + + br_u = range_branch_taken_op(tu, x->r[tu], y->r[tu], op); + br_s = range_branch_taken_op(ts, x->r[ts], y->r[ts], op); + + if (br_u >= 0 && br_s >= 0 && br_u != br_s) + ASSERT_FALSE(true, "branch taken inconsistency!\n"); + + /* if 64-bit ranges are indecisive, use 32-bit subranges to + * eliminate always/never taken branches, if possible + */ + if (br_u == -1 && (t == U64 || t == S64)) { + br = range_branch_taken_op(U32, x->r[U32], y->r[U32], op); + /* we can only reject for OP_EQ, never take branch + * based on lower 32 bits + */ + if (op == OP_EQ && br == 0) + return 0; + /* for OP_NEQ we can be conclusive only if lower 32 bits + * differ and thus inequality branch is always taken + */ + if (op == OP_NE && br == 1) + return 1; + + br = range_branch_taken_op(S32, x->r[S32], y->r[S32], op); + if (op == OP_EQ && br == 0) + return 0; + if (op == OP_NE && br == 1) + return 1; + } + + return br_u >= 0 ? br_u : br_s; + } + return range_branch_taken_op(t, x->r[t], y->r[t], op); +} + +/* ===================================== + * BPF PROGS GENERATION AND VERIFICATION + * ===================================== + */ +struct case_spec { + /* whether to init full register (r1) or sub-register (w1) */ + bool init_subregs; + /* whether to establish initial value range on full register (r1) or + * sub-register (w1) + */ + bool setup_subregs; + /* whether to establish initial value range using signed or unsigned + * comparisons (i.e., initialize umin/umax or smin/smax directly) + */ + bool setup_signed; + /* whether to perform comparison on full registers or sub-registers */ + bool compare_subregs; + /* whether to perform comparison using signed or unsigned operations */ + bool compare_signed; +}; + +/* Generate test BPF program based on provided test ranges, operation, and + * specifications about register bitness and signedness. + */ +static int load_range_cmp_prog(struct range x, struct range y, enum op op, + int branch_taken, struct case_spec spec, + char *log_buf, size_t log_sz, + int *false_pos, int *true_pos) +{ +#define emit(insn) ({ \ + struct bpf_insn __insns[] = { insn }; \ + int __i; \ + for (__i = 0; __i < ARRAY_SIZE(__insns); __i++) \ + insns[cur_pos + __i] = __insns[__i]; \ + cur_pos += __i; \ +}) +#define JMP_TO(target) (target - cur_pos - 1) + int cur_pos = 0, exit_pos, fd, op_code; + struct bpf_insn insns[64]; + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_level = 2, + .log_buf = log_buf, + .log_size = log_sz, + .prog_flags = BPF_F_TEST_REG_INVARIANTS, + ); + + /* ; skip exit block below + * goto +2; + */ + emit(BPF_JMP_A(2)); + exit_pos = cur_pos; + /* ; exit block for all the preparatory conditionals + * out: + * r0 = 0; + * exit; + */ + emit(BPF_MOV64_IMM(BPF_REG_0, 0)); + emit(BPF_EXIT_INSN()); + /* + * ; assign r6/w6 and r7/w7 unpredictable u64/u32 value + * call bpf_get_current_pid_tgid; + * r6 = r0; | w6 = w0; + * call bpf_get_current_pid_tgid; + * r7 = r0; | w7 = w0; + */ + emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid)); + if (spec.init_subregs) + emit(BPF_MOV32_REG(BPF_REG_6, BPF_REG_0)); + else + emit(BPF_MOV64_REG(BPF_REG_6, BPF_REG_0)); + emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid)); + if (spec.init_subregs) + emit(BPF_MOV32_REG(BPF_REG_7, BPF_REG_0)); + else + emit(BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); + /* ; setup initial r6/w6 possible value range ([x.a, x.b]) + * r1 = %[x.a] ll; | w1 = %[x.a]; + * r2 = %[x.b] ll; | w2 = %[x.b]; + * if r6 < r1 goto out; | if w6 < w1 goto out; + * if r6 > r2 goto out; | if w6 > w2 goto out; + */ + if (spec.setup_subregs) { + emit(BPF_MOV32_IMM(BPF_REG_1, (s32)x.a)); + emit(BPF_MOV32_IMM(BPF_REG_2, (s32)x.b)); + emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT, + BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos))); + emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT, + BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos))); + } else { + emit(BPF_LD_IMM64(BPF_REG_1, x.a)); + emit(BPF_LD_IMM64(BPF_REG_2, x.b)); + emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT, + BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos))); + emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT, + BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos))); + } + /* ; setup initial r7/w7 possible value range ([y.a, y.b]) + * r1 = %[y.a] ll; | w1 = %[y.a]; + * r2 = %[y.b] ll; | w2 = %[y.b]; + * if r7 < r1 goto out; | if w7 < w1 goto out; + * if r7 > r2 goto out; | if w7 > w2 goto out; + */ + if (spec.setup_subregs) { + emit(BPF_MOV32_IMM(BPF_REG_1, (s32)y.a)); + emit(BPF_MOV32_IMM(BPF_REG_2, (s32)y.b)); + emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT, + BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos))); + emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT, + BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos))); + } else { + emit(BPF_LD_IMM64(BPF_REG_1, y.a)); + emit(BPF_LD_IMM64(BPF_REG_2, y.b)); + emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT, + BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos))); + emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT, + BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos))); + } + /* ; range test instruction + * if r6 <op> r7 goto +3; | if w6 <op> w7 goto +3; + */ + switch (op) { + case OP_LT: op_code = spec.compare_signed ? BPF_JSLT : BPF_JLT; break; + case OP_LE: op_code = spec.compare_signed ? BPF_JSLE : BPF_JLE; break; + case OP_GT: op_code = spec.compare_signed ? BPF_JSGT : BPF_JGT; break; + case OP_GE: op_code = spec.compare_signed ? BPF_JSGE : BPF_JGE; break; + case OP_EQ: op_code = BPF_JEQ; break; + case OP_NE: op_code = BPF_JNE; break; + default: + printf("unrecognized op %d\n", op); + return -ENOTSUP; + } + /* ; BEFORE conditional, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably + * ; this is used for debugging, as verifier doesn't always print + * ; registers states as of condition jump instruction (e.g., when + * ; precision marking happens) + * r0 = r6; | w0 = w6; + * r0 = r7; | w0 = w7; + */ + if (spec.compare_subregs) { + emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6)); + emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7)); + } else { + emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6)); + emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7)); + } + if (spec.compare_subregs) + emit(BPF_JMP32_REG(op_code, BPF_REG_6, BPF_REG_7, 3)); + else + emit(BPF_JMP_REG(op_code, BPF_REG_6, BPF_REG_7, 3)); + /* ; FALSE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably + * r0 = r6; | w0 = w6; + * r0 = r7; | w0 = w7; + * exit; + */ + *false_pos = cur_pos; + if (spec.compare_subregs) { + emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6)); + emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7)); + } else { + emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6)); + emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7)); + } + if (branch_taken == 1) /* false branch is never taken */ + emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */ + else + emit(BPF_EXIT_INSN()); + /* ; TRUE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably + * r0 = r6; | w0 = w6; + * r0 = r7; | w0 = w7; + * exit; + */ + *true_pos = cur_pos; + if (spec.compare_subregs) { + emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6)); + emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7)); + } else { + emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6)); + emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7)); + } + if (branch_taken == 0) /* true branch is never taken */ + emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */ + emit(BPF_EXIT_INSN()); /* last instruction has to be exit */ + + fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "reg_bounds_test", + "GPL", insns, cur_pos, &opts); + if (fd < 0) + return fd; + + close(fd); + return 0; +#undef emit +#undef JMP_TO +} + +#define str_has_pfx(str, pfx) (strncmp(str, pfx, strlen(pfx)) == 0) + +/* Parse register state from verifier log. + * `s` should point to the start of "Rx = ..." substring in the verifier log. + */ +static int parse_reg_state(const char *s, struct reg_state *reg) +{ + /* There are two generic forms for SCALAR register: + * - known constant: R6_rwD=P%lld + * - range: R6_rwD=scalar(id=1,...), where "..." is a comma-separated + * list of optional range specifiers: + * - umin=%llu, if missing, assumed 0; + * - umax=%llu, if missing, assumed U64_MAX; + * - smin=%lld, if missing, assumed S64_MIN; + * - smax=%lld, if missing, assummed S64_MAX; + * - umin32=%d, if missing, assumed 0; + * - umax32=%d, if missing, assumed U32_MAX; + * - smin32=%d, if missing, assumed S32_MIN; + * - smax32=%d, if missing, assummed S32_MAX; + * - var_off=(%#llx; %#llx), tnum part, we don't care about it. + * + * If some of the values are equal, they will be grouped (but min/max + * are not mixed together, and similarly negative values are not + * grouped with non-negative ones). E.g.: + * + * R6_w=Pscalar(smin=smin32=0, smax=umax=umax32=1000) + * + * _rwD part is optional (and any of the letters can be missing). + * P (precision mark) is optional as well. + * + * Anything inside scalar() is optional, including id, of course. + */ + struct { + const char *pfx; + u64 *dst, def; + bool is_32, is_set; + } *f, fields[8] = { + {"smin=", ®->r[S64].a, S64_MIN}, + {"smax=", ®->r[S64].b, S64_MAX}, + {"umin=", ®->r[U64].a, 0}, + {"umax=", ®->r[U64].b, U64_MAX}, + {"smin32=", ®->r[S32].a, (u32)S32_MIN, true}, + {"smax32=", ®->r[S32].b, (u32)S32_MAX, true}, + {"umin32=", ®->r[U32].a, 0, true}, + {"umax32=", ®->r[U32].b, U32_MAX, true}, + }; + const char *p; + int i; + + p = strchr(s, '='); + if (!p) + return -EINVAL; + p++; + if (*p == 'P') + p++; + + if (!str_has_pfx(p, "scalar(")) { + long long sval; + enum num_t t; + + if (p[0] == '0' && p[1] == 'x') { + if (sscanf(p, "%llx", &sval) != 1) + return -EINVAL; + } else { + if (sscanf(p, "%lld", &sval) != 1) + return -EINVAL; + } + + reg->valid = true; + for (t = first_t; t <= last_t; t++) { + reg->r[t] = range(t, sval, sval); + } + return 0; + } + + p += sizeof("scalar"); + while (p) { + int midxs[ARRAY_SIZE(fields)], mcnt = 0; + u64 val; + + for (i = 0; i < ARRAY_SIZE(fields); i++) { + f = &fields[i]; + if (!str_has_pfx(p, f->pfx)) + continue; + midxs[mcnt++] = i; + p += strlen(f->pfx); + } + + if (mcnt) { + /* populate all matched fields */ + if (p[0] == '0' && p[1] == 'x') { + if (sscanf(p, "%llx", &val) != 1) + return -EINVAL; + } else { + if (sscanf(p, "%lld", &val) != 1) + return -EINVAL; + } + + for (i = 0; i < mcnt; i++) { + f = &fields[midxs[i]]; + f->is_set = true; + *f->dst = f->is_32 ? (u64)(u32)val : val; + } + } else if (str_has_pfx(p, "var_off")) { + /* skip "var_off=(0x0; 0x3f)" part completely */ + p = strchr(p, ')'); + if (!p) + return -EINVAL; + p++; + } + + p = strpbrk(p, ",)"); + if (*p == ')') + break; + if (p) + p++; + } + + reg->valid = true; + + for (i = 0; i < ARRAY_SIZE(fields); i++) { + f = &fields[i]; + if (!f->is_set) + *f->dst = f->def; + } + + return 0; +} + + +/* Parse all register states (TRUE/FALSE branches and DST/SRC registers) + * out of the verifier log for a corresponding test case BPF program. + */ +static int parse_range_cmp_log(const char *log_buf, struct case_spec spec, + int false_pos, int true_pos, + struct reg_state *false1_reg, struct reg_state *false2_reg, + struct reg_state *true1_reg, struct reg_state *true2_reg) +{ + struct { + int insn_idx; + int reg_idx; + const char *reg_upper; + struct reg_state *state; + } specs[] = { + {false_pos, 6, "R6=", false1_reg}, + {false_pos + 1, 7, "R7=", false2_reg}, + {true_pos, 6, "R6=", true1_reg}, + {true_pos + 1, 7, "R7=", true2_reg}, + }; + char buf[32]; + const char *p = log_buf, *q; + int i, err; + + for (i = 0; i < 4; i++) { + sprintf(buf, "%d: (%s) %s = %s%d", specs[i].insn_idx, + spec.compare_subregs ? "bc" : "bf", + spec.compare_subregs ? "w0" : "r0", + spec.compare_subregs ? "w" : "r", specs[i].reg_idx); + + q = strstr(p, buf); + if (!q) { + *specs[i].state = (struct reg_state){.valid = false}; + continue; + } + p = strstr(q, specs[i].reg_upper); + if (!p) + return -EINVAL; + err = parse_reg_state(p, specs[i].state); + if (err) + return -EINVAL; + } + return 0; +} + +/* Validate ranges match, and print details if they don't */ +static bool assert_range_eq(enum num_t t, struct range x, struct range y, + const char *ctx1, const char *ctx2) +{ + DEFINE_STRBUF(sb, 512); + + if (range_eq(x, y)) + return true; + + snappendf(sb, "MISMATCH %s.%s: ", ctx1, ctx2); + snprintf_range(t, sb, x); + snappendf(sb, " != "); + snprintf_range(t, sb, y); + + printf("%s\n", sb->buf); + + return false; +} + +/* Validate that register states match, and print details if they don't */ +static bool assert_reg_state_eq(struct reg_state *r, struct reg_state *e, const char *ctx) +{ + bool ok = true; + enum num_t t; + + if (r->valid != e->valid) { + printf("MISMATCH %s: actual %s != expected %s\n", ctx, + r->valid ? "<valid>" : "<invalid>", + e->valid ? "<valid>" : "<invalid>"); + return false; + } + + if (!r->valid) + return true; + + for (t = first_t; t <= last_t; t++) { + if (!assert_range_eq(t, r->r[t], e->r[t], ctx, t_str(t))) + ok = false; + } + + return ok; +} + +/* Printf verifier log, filtering out irrelevant noise */ +static void print_verifier_log(const char *buf) +{ + const char *p; + + while (buf[0]) { + p = strchrnul(buf, '\n'); + + /* filter out irrelevant precision backtracking logs */ + if (str_has_pfx(buf, "mark_precise: ")) + goto skip_line; + + printf("%.*s\n", (int)(p - buf), buf); + +skip_line: + buf = *p == '\0' ? p : p + 1; + } +} + +/* Simulate provided test case purely with our own range-based logic. + * This is done to set up expectations for verifier's branch_taken logic and + * verifier's register states in the verifier log. + */ +static void sim_case(enum num_t init_t, enum num_t cond_t, + struct range x, struct range y, enum op op, + struct reg_state *fr1, struct reg_state *fr2, + struct reg_state *tr1, struct reg_state *tr2, + int *branch_taken) +{ + const u64 A = x.a; + const u64 B = x.b; + const u64 C = y.a; + const u64 D = y.b; + struct reg_state rc; + enum op rev_op = complement_op(op); + enum num_t t; + + fr1->valid = fr2->valid = true; + tr1->valid = tr2->valid = true; + for (t = first_t; t <= last_t; t++) { + /* if we are initializing using 32-bit subregisters, + * full registers get upper 32 bits zeroed automatically + */ + struct range z = t_is_32(init_t) ? unkn_subreg(t) : unkn[t]; + + fr1->r[t] = fr2->r[t] = tr1->r[t] = tr2->r[t] = z; + } + + /* step 1: r1 >= A, r2 >= C */ + reg_state_set_const(&rc, init_t, A); + reg_state_cond(init_t, fr1, &rc, OP_GE, fr1, NULL, "r1>=A"); + reg_state_set_const(&rc, init_t, C); + reg_state_cond(init_t, fr2, &rc, OP_GE, fr2, NULL, "r2>=C"); + *tr1 = *fr1; + *tr2 = *fr2; + if (env.verbosity >= VERBOSE_VERY) { + printf("STEP1 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n"); + printf("STEP1 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n"); + } + + /* step 2: r1 <= B, r2 <= D */ + reg_state_set_const(&rc, init_t, B); + reg_state_cond(init_t, fr1, &rc, OP_LE, fr1, NULL, "r1<=B"); + reg_state_set_const(&rc, init_t, D); + reg_state_cond(init_t, fr2, &rc, OP_LE, fr2, NULL, "r2<=D"); + *tr1 = *fr1; + *tr2 = *fr2; + if (env.verbosity >= VERBOSE_VERY) { + printf("STEP2 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n"); + printf("STEP2 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n"); + } + + /* step 3: r1 <op> r2 */ + *branch_taken = reg_state_branch_taken_op(cond_t, fr1, fr2, op); + fr1->valid = fr2->valid = false; + tr1->valid = tr2->valid = false; + if (*branch_taken != 1) { /* FALSE is possible */ + fr1->valid = fr2->valid = true; + reg_state_cond(cond_t, fr1, fr2, rev_op, fr1, fr2, "FALSE"); + } + if (*branch_taken != 0) { /* TRUE is possible */ + tr1->valid = tr2->valid = true; + reg_state_cond(cond_t, tr1, tr2, op, tr1, tr2, "TRUE"); + } + if (env.verbosity >= VERBOSE_VERY) { + printf("STEP3 (%s) FALSE R1:", t_str(cond_t)); print_reg_state(fr1, "\n"); + printf("STEP3 (%s) FALSE R2:", t_str(cond_t)); print_reg_state(fr2, "\n"); + printf("STEP3 (%s) TRUE R1:", t_str(cond_t)); print_reg_state(tr1, "\n"); + printf("STEP3 (%s) TRUE R2:", t_str(cond_t)); print_reg_state(tr2, "\n"); + } +} + +/* =============================== + * HIGH-LEVEL TEST CASE VALIDATION + * =============================== + */ +static u32 upper_seeds[] = { + 0, + 1, + U32_MAX, + U32_MAX - 1, + S32_MAX, + (u32)S32_MIN, +}; + +static u32 lower_seeds[] = { + 0, + 1, + 2, (u32)-2, + 255, (u32)-255, + UINT_MAX, + UINT_MAX - 1, + INT_MAX, + (u32)INT_MIN, +}; + +struct ctx { + int val_cnt, subval_cnt, range_cnt, subrange_cnt; + u64 uvals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)]; + s64 svals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)]; + u32 usubvals[ARRAY_SIZE(lower_seeds)]; + s32 ssubvals[ARRAY_SIZE(lower_seeds)]; + struct range *uranges, *sranges; + struct range *usubranges, *ssubranges; + int max_failure_cnt, cur_failure_cnt; + int total_case_cnt, case_cnt; + int rand_case_cnt; + unsigned rand_seed; + __u64 start_ns; + char progress_ctx[64]; +}; + +static void cleanup_ctx(struct ctx *ctx) +{ + free(ctx->uranges); + free(ctx->sranges); + free(ctx->usubranges); + free(ctx->ssubranges); +} + +struct subtest_case { + enum num_t init_t; + enum num_t cond_t; + struct range x; + struct range y; + enum op op; +}; + +static void subtest_case_str(struct strbuf *sb, struct subtest_case *t, bool use_op) +{ + snappendf(sb, "(%s)", t_str(t->init_t)); + snprintf_range(t->init_t, sb, t->x); + snappendf(sb, " (%s)%s ", t_str(t->cond_t), use_op ? op_str(t->op) : "<op>"); + snprintf_range(t->init_t, sb, t->y); +} + +/* Generate and validate test case based on specific combination of setup + * register ranges (including their expected num_t domain), and conditional + * operation to perform (including num_t domain in which it has to be + * performed) + */ +static int verify_case_op(enum num_t init_t, enum num_t cond_t, + struct range x, struct range y, enum op op) +{ + char log_buf[256 * 1024]; + size_t log_sz = sizeof(log_buf); + int err, false_pos = 0, true_pos = 0, branch_taken; + struct reg_state fr1, fr2, tr1, tr2; + struct reg_state fe1, fe2, te1, te2; + bool failed = false; + struct case_spec spec = { + .init_subregs = (init_t == U32 || init_t == S32), + .setup_subregs = (init_t == U32 || init_t == S32), + .setup_signed = (init_t == S64 || init_t == S32), + .compare_subregs = (cond_t == U32 || cond_t == S32), + .compare_signed = (cond_t == S64 || cond_t == S32), + }; + + log_buf[0] = '\0'; + + sim_case(init_t, cond_t, x, y, op, &fe1, &fe2, &te1, &te2, &branch_taken); + + err = load_range_cmp_prog(x, y, op, branch_taken, spec, + log_buf, log_sz, &false_pos, &true_pos); + if (err) { + ASSERT_OK(err, "load_range_cmp_prog"); + failed = true; + } + + err = parse_range_cmp_log(log_buf, spec, false_pos, true_pos, + &fr1, &fr2, &tr1, &tr2); + if (err) { + ASSERT_OK(err, "parse_range_cmp_log"); + failed = true; + } + + if (!assert_reg_state_eq(&fr1, &fe1, "false_reg1") || + !assert_reg_state_eq(&fr2, &fe2, "false_reg2") || + !assert_reg_state_eq(&tr1, &te1, "true_reg1") || + !assert_reg_state_eq(&tr2, &te2, "true_reg2")) { + failed = true; + } + + if (failed || env.verbosity >= VERBOSE_NORMAL) { + if (failed || env.verbosity >= VERBOSE_VERY) { + printf("VERIFIER LOG:\n========================\n"); + print_verifier_log(log_buf); + printf("=====================\n"); + } + printf("ACTUAL FALSE1: "); print_reg_state(&fr1, "\n"); + printf("EXPECTED FALSE1: "); print_reg_state(&fe1, "\n"); + printf("ACTUAL FALSE2: "); print_reg_state(&fr2, "\n"); + printf("EXPECTED FALSE2: "); print_reg_state(&fe2, "\n"); + printf("ACTUAL TRUE1: "); print_reg_state(&tr1, "\n"); + printf("EXPECTED TRUE1: "); print_reg_state(&te1, "\n"); + printf("ACTUAL TRUE2: "); print_reg_state(&tr2, "\n"); + printf("EXPECTED TRUE2: "); print_reg_state(&te2, "\n"); + + return failed ? -EINVAL : 0; + } + + return 0; +} + +/* Given setup ranges and number types, go over all supported operations, + * generating individual subtest for each allowed combination + */ +static int verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t, + struct range x, struct range y, bool is_subtest) +{ + DEFINE_STRBUF(sb, 256); + int err; + struct subtest_case sub = { + .init_t = init_t, + .cond_t = cond_t, + .x = x, + .y = y, + }; + + sb->pos = 0; /* reset position in strbuf */ + subtest_case_str(sb, &sub, false /* ignore op */); + if (is_subtest && !test__start_subtest(sb->buf)) + return 0; + + for (sub.op = first_op; sub.op <= last_op; sub.op++) { + sb->pos = 0; /* reset position in strbuf */ + subtest_case_str(sb, &sub, true /* print op */); + + if (env.verbosity >= VERBOSE_NORMAL) /* this speeds up debugging */ + printf("TEST CASE: %s\n", sb->buf); + + err = verify_case_op(init_t, cond_t, x, y, sub.op); + if (err || env.verbosity >= VERBOSE_NORMAL) + ASSERT_OK(err, sb->buf); + if (err) { + ctx->cur_failure_cnt++; + if (ctx->cur_failure_cnt > ctx->max_failure_cnt) + return err; + return 0; /* keep testing other cases */ + } + ctx->case_cnt++; + if ((ctx->case_cnt % 10000) == 0) { + double progress = (ctx->case_cnt + 0.0) / ctx->total_case_cnt; + u64 elapsed_ns = get_time_ns() - ctx->start_ns; + double remain_ns = elapsed_ns / progress * (1 - progress); + + fprintf(env.stderr, "PROGRESS (%s): %d/%d (%.2lf%%), " + "elapsed %llu mins (%.2lf hrs), " + "ETA %.0lf mins (%.2lf hrs)\n", + ctx->progress_ctx, + ctx->case_cnt, ctx->total_case_cnt, 100.0 * progress, + elapsed_ns / 1000000000 / 60, + elapsed_ns / 1000000000.0 / 3600, + remain_ns / 1000000000.0 / 60, + remain_ns / 1000000000.0 / 3600); + } + } + + return 0; +} + +static int verify_case(struct ctx *ctx, enum num_t init_t, enum num_t cond_t, + struct range x, struct range y) +{ + return verify_case_opt(ctx, init_t, cond_t, x, y, true /* is_subtest */); +} + +/* ================================ + * GENERATED CASES FROM SEED VALUES + * ================================ + */ +static int u64_cmp(const void *p1, const void *p2) +{ + u64 x1 = *(const u64 *)p1, x2 = *(const u64 *)p2; + + return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0; +} + +static int u32_cmp(const void *p1, const void *p2) +{ + u32 x1 = *(const u32 *)p1, x2 = *(const u32 *)p2; + + return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0; +} + +static int s64_cmp(const void *p1, const void *p2) +{ + s64 x1 = *(const s64 *)p1, x2 = *(const s64 *)p2; + + return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0; +} + +static int s32_cmp(const void *p1, const void *p2) +{ + s32 x1 = *(const s32 *)p1, x2 = *(const s32 *)p2; + + return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0; +} + +/* Generate valid unique constants from seeds, both signed and unsigned */ +static void gen_vals(struct ctx *ctx) +{ + int i, j, cnt = 0; + + for (i = 0; i < ARRAY_SIZE(upper_seeds); i++) { + for (j = 0; j < ARRAY_SIZE(lower_seeds); j++) { + ctx->uvals[cnt++] = (((u64)upper_seeds[i]) << 32) | lower_seeds[j]; + } + } + + /* sort and compact uvals (i.e., it's `sort | uniq`) */ + qsort(ctx->uvals, cnt, sizeof(*ctx->uvals), u64_cmp); + for (i = 1, j = 0; i < cnt; i++) { + if (ctx->uvals[j] == ctx->uvals[i]) + continue; + j++; + ctx->uvals[j] = ctx->uvals[i]; + } + ctx->val_cnt = j + 1; + + /* we have exactly the same number of s64 values, they are just in + * a different order than u64s, so just sort them differently + */ + for (i = 0; i < ctx->val_cnt; i++) + ctx->svals[i] = ctx->uvals[i]; + qsort(ctx->svals, ctx->val_cnt, sizeof(*ctx->svals), s64_cmp); + + if (env.verbosity >= VERBOSE_SUPER) { + DEFINE_STRBUF(sb1, 256); + DEFINE_STRBUF(sb2, 256); + + for (i = 0; i < ctx->val_cnt; i++) { + sb1->pos = sb2->pos = 0; + snprintf_num(U64, sb1, ctx->uvals[i]); + snprintf_num(S64, sb2, ctx->svals[i]); + printf("SEED #%d: u64=%-20s s64=%-20s\n", i, sb1->buf, sb2->buf); + } + } + + /* 32-bit values are generated separately */ + cnt = 0; + for (i = 0; i < ARRAY_SIZE(lower_seeds); i++) { + ctx->usubvals[cnt++] = lower_seeds[i]; + } + + /* sort and compact usubvals (i.e., it's `sort | uniq`) */ + qsort(ctx->usubvals, cnt, sizeof(*ctx->usubvals), u32_cmp); + for (i = 1, j = 0; i < cnt; i++) { + if (ctx->usubvals[j] == ctx->usubvals[i]) + continue; + j++; + ctx->usubvals[j] = ctx->usubvals[i]; + } + ctx->subval_cnt = j + 1; + + for (i = 0; i < ctx->subval_cnt; i++) + ctx->ssubvals[i] = ctx->usubvals[i]; + qsort(ctx->ssubvals, ctx->subval_cnt, sizeof(*ctx->ssubvals), s32_cmp); + + if (env.verbosity >= VERBOSE_SUPER) { + DEFINE_STRBUF(sb1, 256); + DEFINE_STRBUF(sb2, 256); + + for (i = 0; i < ctx->subval_cnt; i++) { + sb1->pos = sb2->pos = 0; + snprintf_num(U32, sb1, ctx->usubvals[i]); + snprintf_num(S32, sb2, ctx->ssubvals[i]); + printf("SUBSEED #%d: u32=%-10s s32=%-10s\n", i, sb1->buf, sb2->buf); + } + } +} + +/* Generate valid ranges from upper/lower seeds */ +static int gen_ranges(struct ctx *ctx) +{ + int i, j, cnt = 0; + + for (i = 0; i < ctx->val_cnt; i++) { + for (j = i; j < ctx->val_cnt; j++) { + if (env.verbosity >= VERBOSE_SUPER) { + DEFINE_STRBUF(sb1, 256); + DEFINE_STRBUF(sb2, 256); + + sb1->pos = sb2->pos = 0; + snprintf_range(U64, sb1, range(U64, ctx->uvals[i], ctx->uvals[j])); + snprintf_range(S64, sb2, range(S64, ctx->svals[i], ctx->svals[j])); + printf("RANGE #%d: u64=%-40s s64=%-40s\n", cnt, sb1->buf, sb2->buf); + } + cnt++; + } + } + ctx->range_cnt = cnt; + + ctx->uranges = calloc(ctx->range_cnt, sizeof(*ctx->uranges)); + if (!ASSERT_OK_PTR(ctx->uranges, "uranges_calloc")) + return -EINVAL; + ctx->sranges = calloc(ctx->range_cnt, sizeof(*ctx->sranges)); + if (!ASSERT_OK_PTR(ctx->sranges, "sranges_calloc")) + return -EINVAL; + + cnt = 0; + for (i = 0; i < ctx->val_cnt; i++) { + for (j = i; j < ctx->val_cnt; j++) { + ctx->uranges[cnt] = range(U64, ctx->uvals[i], ctx->uvals[j]); + ctx->sranges[cnt] = range(S64, ctx->svals[i], ctx->svals[j]); + cnt++; + } + } + + cnt = 0; + for (i = 0; i < ctx->subval_cnt; i++) { + for (j = i; j < ctx->subval_cnt; j++) { + if (env.verbosity >= VERBOSE_SUPER) { + DEFINE_STRBUF(sb1, 256); + DEFINE_STRBUF(sb2, 256); + + sb1->pos = sb2->pos = 0; + snprintf_range(U32, sb1, range(U32, ctx->usubvals[i], ctx->usubvals[j])); + snprintf_range(S32, sb2, range(S32, ctx->ssubvals[i], ctx->ssubvals[j])); + printf("SUBRANGE #%d: u32=%-20s s32=%-20s\n", cnt, sb1->buf, sb2->buf); + } + cnt++; + } + } + ctx->subrange_cnt = cnt; + + ctx->usubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->usubranges)); + if (!ASSERT_OK_PTR(ctx->usubranges, "usubranges_calloc")) + return -EINVAL; + ctx->ssubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->ssubranges)); + if (!ASSERT_OK_PTR(ctx->ssubranges, "ssubranges_calloc")) + return -EINVAL; + + cnt = 0; + for (i = 0; i < ctx->subval_cnt; i++) { + for (j = i; j < ctx->subval_cnt; j++) { + ctx->usubranges[cnt] = range(U32, ctx->usubvals[i], ctx->usubvals[j]); + ctx->ssubranges[cnt] = range(S32, ctx->ssubvals[i], ctx->ssubvals[j]); + cnt++; + } + } + + return 0; +} + +static int parse_env_vars(struct ctx *ctx) +{ + const char *s; + + if ((s = getenv("REG_BOUNDS_MAX_FAILURE_CNT"))) { + errno = 0; + ctx->max_failure_cnt = strtol(s, NULL, 10); + if (errno || ctx->max_failure_cnt < 0) { + ASSERT_OK(-errno, "REG_BOUNDS_MAX_FAILURE_CNT"); + return -EINVAL; + } + } + + if ((s = getenv("REG_BOUNDS_RAND_CASE_CNT"))) { + errno = 0; + ctx->rand_case_cnt = strtol(s, NULL, 10); + if (errno || ctx->rand_case_cnt < 0) { + ASSERT_OK(-errno, "REG_BOUNDS_RAND_CASE_CNT"); + return -EINVAL; + } + } + + if ((s = getenv("REG_BOUNDS_RAND_SEED"))) { + errno = 0; + ctx->rand_seed = strtoul(s, NULL, 10); + if (errno) { + ASSERT_OK(-errno, "REG_BOUNDS_RAND_SEED"); + return -EINVAL; + } + } + + return 0; +} + +static int prepare_gen_tests(struct ctx *ctx) +{ + const char *s; + int err; + + if (!(s = getenv("SLOW_TESTS")) || strcmp(s, "1") != 0) { + test__skip(); + return -ENOTSUP; + } + + err = parse_env_vars(ctx); + if (err) + return err; + + gen_vals(ctx); + err = gen_ranges(ctx); + if (err) { + ASSERT_OK(err, "gen_ranges"); + return err; + } + + return 0; +} + +/* Go over generated constants and ranges and validate various supported + * combinations of them + */ +static void validate_gen_range_vs_const_64(enum num_t init_t, enum num_t cond_t) +{ + struct ctx ctx; + struct range rconst; + const struct range *ranges; + const u64 *vals; + int i, j; + + memset(&ctx, 0, sizeof(ctx)); + + if (prepare_gen_tests(&ctx)) + goto cleanup; + + ranges = init_t == U64 ? ctx.uranges : ctx.sranges; + vals = init_t == U64 ? ctx.uvals : (const u64 *)ctx.svals; + + ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.range_cnt * ctx.val_cnt); + ctx.start_ns = get_time_ns(); + snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx), + "RANGE x CONST, %s -> %s", + t_str(init_t), t_str(cond_t)); + + for (i = 0; i < ctx.val_cnt; i++) { + for (j = 0; j < ctx.range_cnt; j++) { + rconst = range(init_t, vals[i], vals[i]); + + /* (u64|s64)(<range> x <const>) */ + if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst)) + goto cleanup; + /* (u64|s64)(<const> x <range>) */ + if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j])) + goto cleanup; + } + } + +cleanup: + cleanup_ctx(&ctx); +} + +static void validate_gen_range_vs_const_32(enum num_t init_t, enum num_t cond_t) +{ + struct ctx ctx; + struct range rconst; + const struct range *ranges; + const u32 *vals; + int i, j; + + memset(&ctx, 0, sizeof(ctx)); + + if (prepare_gen_tests(&ctx)) + goto cleanup; + + ranges = init_t == U32 ? ctx.usubranges : ctx.ssubranges; + vals = init_t == U32 ? ctx.usubvals : (const u32 *)ctx.ssubvals; + + ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.subrange_cnt * ctx.subval_cnt); + ctx.start_ns = get_time_ns(); + snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx), + "RANGE x CONST, %s -> %s", + t_str(init_t), t_str(cond_t)); + + for (i = 0; i < ctx.subval_cnt; i++) { + for (j = 0; j < ctx.subrange_cnt; j++) { + rconst = range(init_t, vals[i], vals[i]); + + /* (u32|s32)(<range> x <const>) */ + if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst)) + goto cleanup; + /* (u32|s32)(<const> x <range>) */ + if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j])) + goto cleanup; + } + } + +cleanup: + cleanup_ctx(&ctx); +} + +static void validate_gen_range_vs_range(enum num_t init_t, enum num_t cond_t) +{ + struct ctx ctx; + const struct range *ranges; + int i, j, rcnt; + + memset(&ctx, 0, sizeof(ctx)); + + if (prepare_gen_tests(&ctx)) + goto cleanup; + + switch (init_t) + { + case U64: + ranges = ctx.uranges; + rcnt = ctx.range_cnt; + break; + case U32: + ranges = ctx.usubranges; + rcnt = ctx.subrange_cnt; + break; + case S64: + ranges = ctx.sranges; + rcnt = ctx.range_cnt; + break; + case S32: + ranges = ctx.ssubranges; + rcnt = ctx.subrange_cnt; + break; + default: + printf("validate_gen_range_vs_range!\n"); + exit(1); + } + + ctx.total_case_cnt = (last_op - first_op + 1) * (2 * rcnt * (rcnt + 1) / 2); + ctx.start_ns = get_time_ns(); + snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx), + "RANGE x RANGE, %s -> %s", + t_str(init_t), t_str(cond_t)); + + for (i = 0; i < rcnt; i++) { + for (j = i; j < rcnt; j++) { + /* (<range> x <range>) */ + if (verify_case(&ctx, init_t, cond_t, ranges[i], ranges[j])) + goto cleanup; + if (verify_case(&ctx, init_t, cond_t, ranges[j], ranges[i])) + goto cleanup; + } + } + +cleanup: + cleanup_ctx(&ctx); +} + +/* Go over thousands of test cases generated from initial seed values. + * Given this take a long time, guard this begind SLOW_TESTS=1 envvar. If + * envvar is not set, this test is skipped during test_progs testing. + * + * We split this up into smaller subsets based on initialization and + * conditiona numeric domains to get an easy parallelization with test_progs' + * -j argument. + */ + +/* RANGE x CONST, U64 initial range */ +void test_reg_bounds_gen_consts_u64_u64(void) { validate_gen_range_vs_const_64(U64, U64); } +void test_reg_bounds_gen_consts_u64_s64(void) { validate_gen_range_vs_const_64(U64, S64); } +void test_reg_bounds_gen_consts_u64_u32(void) { validate_gen_range_vs_const_64(U64, U32); } +void test_reg_bounds_gen_consts_u64_s32(void) { validate_gen_range_vs_const_64(U64, S32); } +/* RANGE x CONST, S64 initial range */ +void test_reg_bounds_gen_consts_s64_u64(void) { validate_gen_range_vs_const_64(S64, U64); } +void test_reg_bounds_gen_consts_s64_s64(void) { validate_gen_range_vs_const_64(S64, S64); } +void test_reg_bounds_gen_consts_s64_u32(void) { validate_gen_range_vs_const_64(S64, U32); } +void test_reg_bounds_gen_consts_s64_s32(void) { validate_gen_range_vs_const_64(S64, S32); } +/* RANGE x CONST, U32 initial range */ +void test_reg_bounds_gen_consts_u32_u64(void) { validate_gen_range_vs_const_32(U32, U64); } +void test_reg_bounds_gen_consts_u32_s64(void) { validate_gen_range_vs_const_32(U32, S64); } +void test_reg_bounds_gen_consts_u32_u32(void) { validate_gen_range_vs_const_32(U32, U32); } +void test_reg_bounds_gen_consts_u32_s32(void) { validate_gen_range_vs_const_32(U32, S32); } +/* RANGE x CONST, S32 initial range */ +void test_reg_bounds_gen_consts_s32_u64(void) { validate_gen_range_vs_const_32(S32, U64); } +void test_reg_bounds_gen_consts_s32_s64(void) { validate_gen_range_vs_const_32(S32, S64); } +void test_reg_bounds_gen_consts_s32_u32(void) { validate_gen_range_vs_const_32(S32, U32); } +void test_reg_bounds_gen_consts_s32_s32(void) { validate_gen_range_vs_const_32(S32, S32); } + +/* RANGE x RANGE, U64 initial range */ +void test_reg_bounds_gen_ranges_u64_u64(void) { validate_gen_range_vs_range(U64, U64); } +void test_reg_bounds_gen_ranges_u64_s64(void) { validate_gen_range_vs_range(U64, S64); } +void test_reg_bounds_gen_ranges_u64_u32(void) { validate_gen_range_vs_range(U64, U32); } +void test_reg_bounds_gen_ranges_u64_s32(void) { validate_gen_range_vs_range(U64, S32); } +/* RANGE x RANGE, S64 initial range */ +void test_reg_bounds_gen_ranges_s64_u64(void) { validate_gen_range_vs_range(S64, U64); } +void test_reg_bounds_gen_ranges_s64_s64(void) { validate_gen_range_vs_range(S64, S64); } +void test_reg_bounds_gen_ranges_s64_u32(void) { validate_gen_range_vs_range(S64, U32); } +void test_reg_bounds_gen_ranges_s64_s32(void) { validate_gen_range_vs_range(S64, S32); } +/* RANGE x RANGE, U32 initial range */ +void test_reg_bounds_gen_ranges_u32_u64(void) { validate_gen_range_vs_range(U32, U64); } +void test_reg_bounds_gen_ranges_u32_s64(void) { validate_gen_range_vs_range(U32, S64); } +void test_reg_bounds_gen_ranges_u32_u32(void) { validate_gen_range_vs_range(U32, U32); } +void test_reg_bounds_gen_ranges_u32_s32(void) { validate_gen_range_vs_range(U32, S32); } +/* RANGE x RANGE, S32 initial range */ +void test_reg_bounds_gen_ranges_s32_u64(void) { validate_gen_range_vs_range(S32, U64); } +void test_reg_bounds_gen_ranges_s32_s64(void) { validate_gen_range_vs_range(S32, S64); } +void test_reg_bounds_gen_ranges_s32_u32(void) { validate_gen_range_vs_range(S32, U32); } +void test_reg_bounds_gen_ranges_s32_s32(void) { validate_gen_range_vs_range(S32, S32); } + +#define DEFAULT_RAND_CASE_CNT 100 + +#define RAND_21BIT_MASK ((1 << 22) - 1) + +static u64 rand_u64() +{ + /* RAND_MAX is guaranteed to be at least 1<<15, but in practice it + * seems to be 1<<31, so we need to call it thrice to get full u64; + * we'll use rougly equal split: 22 + 21 + 21 bits + */ + return ((u64)random() << 42) | + (((u64)random() & RAND_21BIT_MASK) << 21) | + (random() & RAND_21BIT_MASK); +} + +static u64 rand_const(enum num_t t) +{ + return cast_t(t, rand_u64()); +} + +static struct range rand_range(enum num_t t) +{ + u64 x = rand_const(t), y = rand_const(t); + + return range(t, min_t(t, x, y), max_t(t, x, y)); +} + +static void validate_rand_ranges(enum num_t init_t, enum num_t cond_t, bool const_range) +{ + struct ctx ctx; + struct range range1, range2; + int err, i; + u64 t; + + memset(&ctx, 0, sizeof(ctx)); + + err = parse_env_vars(&ctx); + if (err) { + ASSERT_OK(err, "parse_env_vars"); + return; + } + + if (ctx.rand_case_cnt == 0) + ctx.rand_case_cnt = DEFAULT_RAND_CASE_CNT; + if (ctx.rand_seed == 0) + ctx.rand_seed = (unsigned)get_time_ns(); + + srandom(ctx.rand_seed); + + ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.rand_case_cnt); + ctx.start_ns = get_time_ns(); + snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx), + "[RANDOM SEED %u] RANGE x %s, %s -> %s", + ctx.rand_seed, const_range ? "CONST" : "RANGE", + t_str(init_t), t_str(cond_t)); + + for (i = 0; i < ctx.rand_case_cnt; i++) { + range1 = rand_range(init_t); + if (const_range) { + t = rand_const(init_t); + range2 = range(init_t, t, t); + } else { + range2 = rand_range(init_t); + } + + /* <range1> x <range2> */ + if (verify_case_opt(&ctx, init_t, cond_t, range1, range2, false /* !is_subtest */)) + goto cleanup; + /* <range2> x <range1> */ + if (verify_case_opt(&ctx, init_t, cond_t, range2, range1, false /* !is_subtest */)) + goto cleanup; + } + +cleanup: + /* make sure we report random seed for reproducing */ + ASSERT_TRUE(true, ctx.progress_ctx); + cleanup_ctx(&ctx); +} + +/* [RANDOM] RANGE x CONST, U64 initial range */ +void test_reg_bounds_rand_consts_u64_u64(void) { validate_rand_ranges(U64, U64, true /* const */); } +void test_reg_bounds_rand_consts_u64_s64(void) { validate_rand_ranges(U64, S64, true /* const */); } +void test_reg_bounds_rand_consts_u64_u32(void) { validate_rand_ranges(U64, U32, true /* const */); } +void test_reg_bounds_rand_consts_u64_s32(void) { validate_rand_ranges(U64, S32, true /* const */); } +/* [RANDOM] RANGE x CONST, S64 initial range */ +void test_reg_bounds_rand_consts_s64_u64(void) { validate_rand_ranges(S64, U64, true /* const */); } +void test_reg_bounds_rand_consts_s64_s64(void) { validate_rand_ranges(S64, S64, true /* const */); } +void test_reg_bounds_rand_consts_s64_u32(void) { validate_rand_ranges(S64, U32, true /* const */); } +void test_reg_bounds_rand_consts_s64_s32(void) { validate_rand_ranges(S64, S32, true /* const */); } +/* [RANDOM] RANGE x CONST, U32 initial range */ +void test_reg_bounds_rand_consts_u32_u64(void) { validate_rand_ranges(U32, U64, true /* const */); } +void test_reg_bounds_rand_consts_u32_s64(void) { validate_rand_ranges(U32, S64, true /* const */); } +void test_reg_bounds_rand_consts_u32_u32(void) { validate_rand_ranges(U32, U32, true /* const */); } +void test_reg_bounds_rand_consts_u32_s32(void) { validate_rand_ranges(U32, S32, true /* const */); } +/* [RANDOM] RANGE x CONST, S32 initial range */ +void test_reg_bounds_rand_consts_s32_u64(void) { validate_rand_ranges(S32, U64, true /* const */); } +void test_reg_bounds_rand_consts_s32_s64(void) { validate_rand_ranges(S32, S64, true /* const */); } +void test_reg_bounds_rand_consts_s32_u32(void) { validate_rand_ranges(S32, U32, true /* const */); } +void test_reg_bounds_rand_consts_s32_s32(void) { validate_rand_ranges(S32, S32, true /* const */); } + +/* [RANDOM] RANGE x RANGE, U64 initial range */ +void test_reg_bounds_rand_ranges_u64_u64(void) { validate_rand_ranges(U64, U64, false /* range */); } +void test_reg_bounds_rand_ranges_u64_s64(void) { validate_rand_ranges(U64, S64, false /* range */); } +void test_reg_bounds_rand_ranges_u64_u32(void) { validate_rand_ranges(U64, U32, false /* range */); } +void test_reg_bounds_rand_ranges_u64_s32(void) { validate_rand_ranges(U64, S32, false /* range */); } +/* [RANDOM] RANGE x RANGE, S64 initial range */ +void test_reg_bounds_rand_ranges_s64_u64(void) { validate_rand_ranges(S64, U64, false /* range */); } +void test_reg_bounds_rand_ranges_s64_s64(void) { validate_rand_ranges(S64, S64, false /* range */); } +void test_reg_bounds_rand_ranges_s64_u32(void) { validate_rand_ranges(S64, U32, false /* range */); } +void test_reg_bounds_rand_ranges_s64_s32(void) { validate_rand_ranges(S64, S32, false /* range */); } +/* [RANDOM] RANGE x RANGE, U32 initial range */ +void test_reg_bounds_rand_ranges_u32_u64(void) { validate_rand_ranges(U32, U64, false /* range */); } +void test_reg_bounds_rand_ranges_u32_s64(void) { validate_rand_ranges(U32, S64, false /* range */); } +void test_reg_bounds_rand_ranges_u32_u32(void) { validate_rand_ranges(U32, U32, false /* range */); } +void test_reg_bounds_rand_ranges_u32_s32(void) { validate_rand_ranges(U32, S32, false /* range */); } +/* [RANDOM] RANGE x RANGE, S32 initial range */ +void test_reg_bounds_rand_ranges_s32_u64(void) { validate_rand_ranges(S32, U64, false /* range */); } +void test_reg_bounds_rand_ranges_s32_s64(void) { validate_rand_ranges(S32, S64, false /* range */); } +void test_reg_bounds_rand_ranges_s32_u32(void) { validate_rand_ranges(S32, U32, false /* range */); } +void test_reg_bounds_rand_ranges_s32_s32(void) { validate_rand_ranges(S32, S32, false /* range */); } + +/* A set of hard-coded "interesting" cases to validate as part of normal + * test_progs test runs + */ +static struct subtest_case crafted_cases[] = { + {U64, U64, {0, 0xffffffff}, {0, 0}}, + {U64, U64, {0, 0x80000000}, {0, 0}}, + {U64, U64, {0x100000000ULL, 0x100000100ULL}, {0, 0}}, + {U64, U64, {0x100000000ULL, 0x180000000ULL}, {0, 0}}, + {U64, U64, {0x100000000ULL, 0x1ffffff00ULL}, {0, 0}}, + {U64, U64, {0x100000000ULL, 0x1ffffff01ULL}, {0, 0}}, + {U64, U64, {0x100000000ULL, 0x1fffffffeULL}, {0, 0}}, + {U64, U64, {0x100000001ULL, 0x1000000ffULL}, {0, 0}}, + + /* single point overlap, interesting BPF_EQ and BPF_NE interactions */ + {U64, U64, {0, 1}, {1, 0x80000000}}, + {U64, S64, {0, 1}, {1, 0x80000000}}, + {U64, U32, {0, 1}, {1, 0x80000000}}, + {U64, S32, {0, 1}, {1, 0x80000000}}, + + {U64, S64, {0, 0xffffffff00000000ULL}, {0, 0}}, + {U64, S64, {0x7fffffffffffffffULL, 0xffffffff00000000ULL}, {0, 0}}, + {U64, S64, {0x7fffffff00000001ULL, 0xffffffff00000000ULL}, {0, 0}}, + {U64, S64, {0, 0xffffffffULL}, {1, 1}}, + {U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}}, + + {U64, U32, {0, 0x100000000}, {0, 0}}, + {U64, U32, {0xfffffffe, 0x100000000}, {0x80000000, 0x80000000}}, + + {U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}}, + /* these are tricky cases where lower 32 bits allow to tighten 64 + * bit boundaries based on tightened lower 32 bit boundaries + */ + {U64, S32, {0, 0x0ffffffffULL}, {0, 0}}, + {U64, S32, {0, 0x100000000ULL}, {0, 0}}, + {U64, S32, {0, 0x100000001ULL}, {0, 0}}, + {U64, S32, {0, 0x180000000ULL}, {0, 0}}, + {U64, S32, {0, 0x17fffffffULL}, {0, 0}}, + {U64, S32, {0, 0x180000001ULL}, {0, 0}}, + + /* verifier knows about [-1, 0] range for s32 for this case already */ + {S64, S64, {0xffffffffffffffffULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}}, + /* but didn't know about these cases initially */ + {U64, U64, {0xffffffff, 0x100000000ULL}, {0, 0}}, /* s32: [-1, 0] */ + {U64, U64, {0xffffffff, 0x100000001ULL}, {0, 0}}, /* s32: [-1, 1] */ + + /* longer convergence case: learning from u64 -> s64 -> u64 -> u32, + * arriving at u32: [1, U32_MAX] (instead of more pessimistic [0, U32_MAX]) + */ + {S64, U64, {0xffffffff00000001ULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}}, + + {U32, U32, {1, U32_MAX}, {0, 0}}, + + {U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}}, + + {S32, U64, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)-255, 0}}, + {S32, S64, {(u32)(s32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}}, + {S32, S64, {0, 1}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}}, + {S32, U32, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}}, +}; + +/* Go over crafted hard-coded cases. This is fast, so we do it as part of + * normal test_progs run. + */ +void test_reg_bounds_crafted(void) +{ + struct ctx ctx; + int i; + + memset(&ctx, 0, sizeof(ctx)); + + for (i = 0; i < ARRAY_SIZE(crafted_cases); i++) { + struct subtest_case *c = &crafted_cases[i]; + + verify_case(&ctx, c->init_t, c->cond_t, c->x, c->y); + verify_case(&ctx, c->init_t, c->cond_t, c->y, c->x); + } + + cleanup_ctx(&ctx); +} diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c index f29c08d93beb..18d451be57c8 100644 --- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -13,22 +13,22 @@ static struct { const char *err_msg; } spin_lock_fail_tests[] = { { "lock_id_kptr_preserve", - "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) " - "R1_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n" + "5: (bf) r1 = r0 ; R0_w=ptr_foo(id=2,ref_obj_id=2) " + "R1_w=ptr_foo(id=2,ref_obj_id=2) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n" "R1 type=ptr_ expected=percpu_ptr_" }, { "lock_id_global_zero", - "; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n" + "; R1_w=map_value(map=.data.A,ks=4,vs=4)\n2: (85) call bpf_this_cpu_ptr#154\n" "R1 type=map_value expected=percpu_ptr_" }, { "lock_id_mapval_preserve", "[0-9]\\+: (bf) r1 = r0 ;" - " R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)" - " R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n" + " R0_w=map_value(id=1,map=array_map,ks=4,vs=8)" + " R1_w=map_value(id=1,map=array_map,ks=4,vs=8)\n" "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n" "R1 type=map_value expected=percpu_ptr_" }, { "lock_id_innermapval_preserve", "[0-9]\\+: (bf) r1 = r0 ;" - " R0=map_value(id=2,off=0,ks=4,vs=8,imm=0)" - " R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n" + " R0=map_value(id=2,ks=4,vs=8)" + " R1_w=map_value(id=2,ks=4,vs=8)\n" "[0-9]\\+: (85) call bpf_this_cpu_ptr#154\n" "R1 type=map_value expected=percpu_ptr_" }, { "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" }, diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c index 51883ccb8020..196abf223465 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c @@ -2387,12 +2387,9 @@ static int generate_dummy_prog(void) const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn); LIBBPF_OPTS(bpf_prog_load_opts, opts); const size_t log_buf_sz = 256; - char *log_buf; + char log_buf[log_buf_sz]; int fd = -1; - log_buf = malloc(log_buf_sz); - if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) - return fd; opts.log_buf = log_buf; opts.log_size = log_buf_sz; @@ -2402,7 +2399,6 @@ static int generate_dummy_prog(void) prog_insns, prog_insn_cnt, &opts); ASSERT_STREQ(log_buf, "", "log_0"); ASSERT_GE(fd, 0, "prog_fd"); - free(log_buf); return fd; } diff --git a/tools/testing/selftests/bpf/prog_tests/vmlinux.c b/tools/testing/selftests/bpf/prog_tests/vmlinux.c index 72310cfc6474..6fb2217d940b 100644 --- a/tools/testing/selftests/bpf/prog_tests/vmlinux.c +++ b/tools/testing/selftests/bpf/prog_tests/vmlinux.c @@ -16,27 +16,27 @@ static void nsleep() void test_vmlinux(void) { - int duration = 0, err; + int err; struct test_vmlinux* skel; struct test_vmlinux__bss *bss; skel = test_vmlinux__open_and_load(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + if (!ASSERT_OK_PTR(skel, "test_vmlinux__open_and_load")) return; bss = skel->bss; err = test_vmlinux__attach(skel); - if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + if (!ASSERT_OK(err, "test_vmlinux__attach")) goto cleanup; /* trigger everything */ nsleep(); - CHECK(!bss->tp_called, "tp", "not called\n"); - CHECK(!bss->raw_tp_called, "raw_tp", "not called\n"); - CHECK(!bss->tp_btf_called, "tp_btf", "not called\n"); - CHECK(!bss->kprobe_called, "kprobe", "not called\n"); - CHECK(!bss->fentry_called, "fentry", "not called\n"); + ASSERT_TRUE(bss->tp_called, "tp"); + ASSERT_TRUE(bss->raw_tp_called, "raw_tp"); + ASSERT_TRUE(bss->tp_btf_called, "tp_btf"); + ASSERT_TRUE(bss->kprobe_called, "kprobe"); + ASSERT_TRUE(bss->fentry_called, "fentry"); cleanup: test_vmlinux__destroy(skel); |