diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
51 files changed, 6570 insertions, 348 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c new file mode 100644 index 000000000000..a83111a32d4a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +#define EMBED_FILE(NAME, PATH) \ +asm ( \ +" .pushsection \".rodata\", \"a\", @progbits \n" \ +" .global "#NAME"_data \n" \ +#NAME"_data: \n" \ +" .incbin \"" PATH "\" \n" \ +#NAME"_data_end: \n" \ +" .global "#NAME"_size \n" \ +" .type "#NAME"_size, @object \n" \ +" .size "#NAME"_size, 4 \n" \ +" .align 4, \n" \ +#NAME"_size: \n" \ +" .int "#NAME"_data_end - "#NAME"_data \n" \ +" .popsection \n" \ +); \ +extern char NAME##_data[]; \ +extern int NAME##_size; + +ssize_t get_base_addr() { + size_t start; + char buf[256]; + FILE *f; + + f = fopen("/proc/self/maps", "r"); + if (!f) + return -errno; + + while (fscanf(f, "%zx-%*x %s %*s\n", &start, buf) == 2) { + if (strcmp(buf, "r-xp") == 0) { + fclose(f); + return start; + } + } + + fclose(f); + return -EINVAL; +} + +EMBED_FILE(probe, "test_attach_probe.o"); + +void test_attach_probe(void) +{ + const char *kprobe_name = "kprobe/sys_nanosleep"; + const char *kretprobe_name = "kretprobe/sys_nanosleep"; + const char *uprobe_name = "uprobe/trigger_func"; + const char *uretprobe_name = "uretprobe/trigger_func"; + const int kprobe_idx = 0, kretprobe_idx = 1; + const int uprobe_idx = 2, uretprobe_idx = 3; + const char *obj_name = "attach_probe"; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, + .object_name = obj_name, + .relaxed_maps = true, + ); + struct bpf_program *kprobe_prog, *kretprobe_prog; + struct bpf_program *uprobe_prog, *uretprobe_prog; + struct bpf_object *obj; + int err, duration = 0, res; + struct bpf_link *kprobe_link = NULL; + struct bpf_link *kretprobe_link = NULL; + struct bpf_link *uprobe_link = NULL; + struct bpf_link *uretprobe_link = NULL; + int results_map_fd; + size_t uprobe_offset; + ssize_t base_addr; + + base_addr = get_base_addr(); + if (CHECK(base_addr < 0, "get_base_addr", + "failed to find base addr: %zd", base_addr)) + return; + uprobe_offset = (size_t)&get_base_addr - base_addr; + + /* open object */ + obj = bpf_object__open_mem(probe_data, probe_size, &open_opts); + if (CHECK(IS_ERR(obj), "obj_open_mem", "err %ld\n", PTR_ERR(obj))) + return; + + if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name", + "wrong obj name '%s', expected '%s'\n", + bpf_object__name(obj), obj_name)) + goto cleanup; + + kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name); + if (CHECK(!kprobe_prog, "find_probe", + "prog '%s' not found\n", kprobe_name)) + goto cleanup; + kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name); + if (CHECK(!kretprobe_prog, "find_probe", + "prog '%s' not found\n", kretprobe_name)) + goto cleanup; + uprobe_prog = bpf_object__find_program_by_title(obj, uprobe_name); + if (CHECK(!uprobe_prog, "find_probe", + "prog '%s' not found\n", uprobe_name)) + goto cleanup; + uretprobe_prog = bpf_object__find_program_by_title(obj, uretprobe_name); + if (CHECK(!uretprobe_prog, "find_probe", + "prog '%s' not found\n", uretprobe_name)) + goto cleanup; + + /* create maps && load programs */ + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; + + /* load maps */ + results_map_fd = bpf_find_map(__func__, obj, "results_map"); + if (CHECK(results_map_fd < 0, "find_results_map", + "err %d\n", results_map_fd)) + goto cleanup; + + kprobe_link = bpf_program__attach_kprobe(kprobe_prog, + false /* retprobe */, + SYS_NANOSLEEP_KPROBE_NAME); + if (CHECK(IS_ERR(kprobe_link), "attach_kprobe", + "err %ld\n", PTR_ERR(kprobe_link))) { + kprobe_link = NULL; + goto cleanup; + } + kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog, + true /* retprobe */, + SYS_NANOSLEEP_KPROBE_NAME); + if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe", + "err %ld\n", PTR_ERR(kretprobe_link))) { + kretprobe_link = NULL; + goto cleanup; + } + uprobe_link = bpf_program__attach_uprobe(uprobe_prog, + false /* retprobe */, + 0 /* self pid */, + "/proc/self/exe", + uprobe_offset); + if (CHECK(IS_ERR(uprobe_link), "attach_uprobe", + "err %ld\n", PTR_ERR(uprobe_link))) { + uprobe_link = NULL; + goto cleanup; + } + uretprobe_link = bpf_program__attach_uprobe(uretprobe_prog, + true /* retprobe */, + -1 /* any pid */, + "/proc/self/exe", + uprobe_offset); + if (CHECK(IS_ERR(uretprobe_link), "attach_uretprobe", + "err %ld\n", PTR_ERR(uretprobe_link))) { + uretprobe_link = NULL; + goto cleanup; + } + + /* trigger & validate kprobe && kretprobe */ + usleep(1); + + err = bpf_map_lookup_elem(results_map_fd, &kprobe_idx, &res); + if (CHECK(err, "get_kprobe_res", + "failed to get kprobe res: %d\n", err)) + goto cleanup; + if (CHECK(res != kprobe_idx + 1, "check_kprobe_res", + "wrong kprobe res: %d\n", res)) + goto cleanup; + + err = bpf_map_lookup_elem(results_map_fd, &kretprobe_idx, &res); + if (CHECK(err, "get_kretprobe_res", + "failed to get kretprobe res: %d\n", err)) + goto cleanup; + if (CHECK(res != kretprobe_idx + 1, "check_kretprobe_res", + "wrong kretprobe res: %d\n", res)) + goto cleanup; + + /* trigger & validate uprobe & uretprobe */ + get_base_addr(); + + err = bpf_map_lookup_elem(results_map_fd, &uprobe_idx, &res); + if (CHECK(err, "get_uprobe_res", + "failed to get uprobe res: %d\n", err)) + goto cleanup; + if (CHECK(res != uprobe_idx + 1, "check_uprobe_res", + "wrong uprobe res: %d\n", res)) + goto cleanup; + + err = bpf_map_lookup_elem(results_map_fd, &uretprobe_idx, &res); + if (CHECK(err, "get_uretprobe_res", + "failed to get uretprobe res: %d\n", err)) + goto cleanup; + if (CHECK(res != uretprobe_idx + 1, "check_uretprobe_res", + "wrong uretprobe res: %d\n", res)) + goto cleanup; + +cleanup: + bpf_link__destroy(kprobe_link); + bpf_link__destroy(kretprobe_link); + bpf_link__destroy(uprobe_link); + bpf_link__destroy(uretprobe_link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index a64f7a02139c..f10029821e16 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -48,16 +48,17 @@ void test_bpf_obj_id(void) /* test_obj_id.o is a dumb prog. It should never fail * to load. */ - if (err) - error_cnt++; - assert(!err); + if (CHECK_FAIL(err)) + continue; /* Insert a magic value to the map */ map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); - assert(map_fds[i] >= 0); + if (CHECK_FAIL(map_fds[i] < 0)) + goto done; err = bpf_map_update_elem(map_fds[i], &array_key, &array_magic_value, 0); - assert(!err); + if (CHECK_FAIL(err)) + goto done; /* Check getting map info */ info_len = sizeof(struct bpf_map_info) * 2; @@ -73,7 +74,7 @@ void test_bpf_obj_id(void) info_len != sizeof(struct bpf_map_info) || strcmp((char *)map_infos[i].name, expected_map_name), "get-map-info(fd)", - "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", + "err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", err, errno, map_infos[i].type, BPF_MAP_TYPE_ARRAY, info_len, sizeof(struct bpf_map_info), @@ -96,9 +97,11 @@ void test_bpf_obj_id(void) prog_infos[i].map_ids = ptr_to_u64(map_ids + i); prog_infos[i].nr_map_ids = 2; err = clock_gettime(CLOCK_REALTIME, &real_time_ts); - assert(!err); + if (CHECK_FAIL(err)) + goto done; err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); - assert(!err); + if (CHECK_FAIL(err)) + goto done; err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], &info_len); load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) @@ -106,8 +109,8 @@ void test_bpf_obj_id(void) if (CHECK(err || prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || info_len != sizeof(struct bpf_prog_info) || - (jit_enabled && !prog_infos[i].jited_prog_len) || - (jit_enabled && + (env.jit_enabled && !prog_infos[i].jited_prog_len) || + (env.jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))) || !prog_infos[i].xlated_prog_len || !memcmp(xlated_insns, zeros, sizeof(zeros)) || @@ -117,11 +120,11 @@ void test_bpf_obj_id(void) *(int *)(long)prog_infos[i].map_ids != map_infos[i].id || strcmp((char *)prog_infos[i].name, expected_prog_name), "get-prog-info(fd)", - "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", + "err %d errno %d i %d type %d(%d) info_len %u(%zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", err, errno, i, prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, info_len, sizeof(struct bpf_prog_info), - jit_enabled, + env.jit_enabled, prog_infos[i].jited_prog_len, prog_infos[i].xlated_prog_len, !!memcmp(jited_insns, zeros, sizeof(zeros)), @@ -185,7 +188,7 @@ void test_bpf_obj_id(void) memcmp(&prog_info, &prog_infos[i], info_len) || *(int *)(long)prog_info.map_ids != saved_map_id, "get-prog-info(next_id->fd)", - "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n", + "err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n", err, errno, info_len, sizeof(struct bpf_prog_info), memcmp(&prog_info, &prog_infos[i], info_len), *(int *)(long)prog_info.map_ids, saved_map_id); @@ -224,14 +227,15 @@ void test_bpf_obj_id(void) nr_id_found++; err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); - assert(!err); + if (CHECK_FAIL(err)) + goto done; err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); CHECK(err || info_len != sizeof(struct bpf_map_info) || memcmp(&map_info, &map_infos[i], info_len) || array_value != array_magic_value, "check get-map-info(next_id->fd)", - "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n", + "err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n", err, errno, info_len, sizeof(struct bpf_map_info), memcmp(&map_info, &map_infos[i], info_len), array_value, array_magic_value); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c new file mode 100644 index 000000000000..9486c13af6b2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include <test_progs.h> +static int libbpf_debug_print(enum libbpf_print_level level, + const char *format, va_list args) +{ + if (level != LIBBPF_DEBUG) { + vprintf(format, args); + return 0; + } + + if (!strstr(format, "verifier log")) + return 0; + vprintf("%s", args); + return 0; +} + +extern int extra_prog_load_log_flags; + +static int check_load(const char *file, enum bpf_prog_type type) +{ + struct bpf_prog_load_attr attr; + struct bpf_object *obj = NULL; + int err, prog_fd; + + memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); + attr.file = file; + attr.prog_type = type; + attr.log_level = 4 | extra_prog_load_log_flags; + attr.prog_flags = BPF_F_TEST_RND_HI32; + err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); + bpf_object__close(obj); + return err; +} + +struct scale_test_def { + const char *file; + enum bpf_prog_type attach_type; + bool fails; +}; + +void test_bpf_verif_scale(void) +{ + struct scale_test_def tests[] = { + { "loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */ }, + + { "test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS }, + { "test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS }, + { "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS }, + + /* full unroll by llvm */ + { "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + + /* partial unroll. llvm will unroll loop ~150 times. + * C loop count -> 600. + * Asm loop count -> 4. + * 16k insns in loop body. + * Total of 5 such loops. Total program size ~82k insns. + */ + { "pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + + /* no unroll at all. + * C loop count -> 600. + * ASM loop count -> 600. + * ~110 insns in loop body. + * Total of 5 such loops. Total program size ~1500 insns. + */ + { "pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + + { "loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "loop4.o", BPF_PROG_TYPE_SCHED_CLS }, + { "loop5.o", BPF_PROG_TYPE_SCHED_CLS }, + + /* partial unroll. 19k insn in a loop. + * Total program size 20.8k insn. + * ~350k processed_insns + */ + { "strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + + /* no unroll, tiny loops */ + { "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + + { "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL }, + { "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL }, + + { "test_xdp_loop.o", BPF_PROG_TYPE_XDP }, + { "test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL }, + }; + libbpf_print_fn_t old_print_fn = NULL; + int err, i; + + if (env.verifier_stats) { + test__force_log(); + old_print_fn = libbpf_set_print(libbpf_debug_print); + } + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + const struct scale_test_def *test = &tests[i]; + + if (!test__start_subtest(test->file)) + continue; + + err = check_load(test->file, test->attach_type); + CHECK_FAIL(err && !test->fails); + } + + if (env.verifier_stats) + libbpf_set_print(old_print_fn); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c new file mode 100644 index 000000000000..7390d3061065 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <bpf/btf.h> + +static int duration = 0; + +void btf_dump_printf(void *ctx, const char *fmt, va_list args) +{ + vfprintf(ctx, fmt, args); +} + +static struct btf_dump_test_case { + const char *name; + const char *file; + struct btf_dump_opts opts; +} btf_dump_test_cases[] = { + {"btf_dump: syntax", "btf_dump_test_case_syntax", {}}, + {"btf_dump: ordering", "btf_dump_test_case_ordering", {}}, + {"btf_dump: padding", "btf_dump_test_case_padding", {}}, + {"btf_dump: packing", "btf_dump_test_case_packing", {}}, + {"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}}, + {"btf_dump: multidim", "btf_dump_test_case_multidim", {}}, + {"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}}, +}; + +static int btf_dump_all_types(const struct btf *btf, + const struct btf_dump_opts *opts) +{ + size_t type_cnt = btf__get_nr_types(btf); + struct btf_dump *d; + int err = 0, id; + + d = btf_dump__new(btf, NULL, opts, btf_dump_printf); + if (IS_ERR(d)) + return PTR_ERR(d); + + for (id = 1; id <= type_cnt; id++) { + err = btf_dump__dump_type(d, id); + if (err) + goto done; + } + +done: + btf_dump__free(d); + return err; +} + +static int test_btf_dump_case(int n, struct btf_dump_test_case *t) +{ + char test_file[256], out_file[256], diff_cmd[1024]; + struct btf *btf = NULL; + int err = 0, fd = -1; + FILE *f = NULL; + + snprintf(test_file, sizeof(test_file), "%s.o", t->file); + + btf = btf__parse_elf(test_file, NULL); + if (CHECK(IS_ERR(btf), "btf_parse_elf", + "failed to load test BTF: %ld\n", PTR_ERR(btf))) { + err = -PTR_ERR(btf); + btf = NULL; + goto done; + } + + snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file); + fd = mkstemp(out_file); + if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) { + err = fd; + goto done; + } + f = fdopen(fd, "w"); + if (CHECK(f == NULL, "open_tmp", "failed to open file: %s(%d)\n", + strerror(errno), errno)) { + close(fd); + goto done; + } + + t->opts.ctx = f; + err = btf_dump_all_types(btf, &t->opts); + fclose(f); + close(fd); + if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) { + goto done; + } + + snprintf(test_file, sizeof(test_file), "progs/%s.c", t->file); + if (access(test_file, R_OK) == -1) + /* + * When the test is run with O=, kselftest copies TEST_FILES + * without preserving the directory structure. + */ + snprintf(test_file, sizeof(test_file), "%s.c", t->file); + /* + * Diff test output and expected test output, contained between + * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. + * For expected output lines, everything before '*' is stripped out. + * Also lines containing comment start and comment end markers are + * ignored. + */ + snprintf(diff_cmd, sizeof(diff_cmd), + "awk '/START-EXPECTED-OUTPUT/{out=1;next} " + "/END-EXPECTED-OUTPUT/{out=0} " + "/\\/\\*|\\*\\//{next} " /* ignore comment start/end lines */ + "out {sub(/^[ \\t]*\\*/, \"\"); print}' '%s' | diff -u - '%s'", + test_file, out_file); + err = system(diff_cmd); + if (CHECK(err, "diff", + "differing test output, output=%s, err=%d, diff cmd:\n%s\n", + out_file, err, diff_cmd)) + goto done; + + remove(out_file); + +done: + btf__free(btf); + return err; +} + +void test_btf_dump() { + int i; + + for (i = 0; i < ARRAY_SIZE(btf_dump_test_cases); i++) { + struct btf_dump_test_case *t = &btf_dump_test_cases[i]; + + if (!test__start_subtest(t->name)) + continue; + + test_btf_dump_case(i, &btf_dump_test_cases[i]); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c new file mode 100644 index 000000000000..05fe85281ff7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "progs/core_reloc_types.h" +#include <sys/mman.h> +#include <sys/syscall.h> + +#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name) + +#define FLAVORS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = 42, \ + .b = 0xc001, \ + .c = 0xbeef, \ +} + +#define FLAVORS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_flavors.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" \ + +#define FLAVORS_CASE(name) { \ + FLAVORS_CASE_COMMON(name), \ + .input = FLAVORS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = FLAVORS_DATA(core_reloc_flavors), \ + .output_len = sizeof(struct core_reloc_flavors), \ +} + +#define FLAVORS_ERR_CASE(name) { \ + FLAVORS_CASE_COMMON(name), \ + .fails = true, \ +} + +#define NESTING_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = { .a = { .a = 42 } }, \ + .b = { .b = { .b = 0xc001 } }, \ +} + +#define NESTING_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_nesting.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define NESTING_CASE(name) { \ + NESTING_CASE_COMMON(name), \ + .input = NESTING_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = NESTING_DATA(core_reloc_nesting), \ + .output_len = sizeof(struct core_reloc_nesting) \ +} + +#define NESTING_ERR_CASE(name) { \ + NESTING_CASE_COMMON(name), \ + .fails = true, \ +} + +#define ARRAYS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = { [2] = 1 }, \ + .b = { [1] = { [2] = { [3] = 2 } } }, \ + .c = { [1] = { .c = 3 } }, \ + .d = { [0] = { [0] = { .d = 4 } } }, \ +} + +#define ARRAYS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_arrays.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define ARRAYS_CASE(name) { \ + ARRAYS_CASE_COMMON(name), \ + .input = ARRAYS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_arrays_output) { \ + .a2 = 1, \ + .b123 = 2, \ + .c1c = 3, \ + .d00d = 4, \ + }, \ + .output_len = sizeof(struct core_reloc_arrays_output) \ +} + +#define ARRAYS_ERR_CASE(name) { \ + ARRAYS_CASE_COMMON(name), \ + .fails = true, \ +} + +#define PRIMITIVES_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = 1, \ + .b = 2, \ + .c = 3, \ + .d = (void *)4, \ + .f = (void *)5, \ +} + +#define PRIMITIVES_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_primitives.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define PRIMITIVES_CASE(name) { \ + PRIMITIVES_CASE_COMMON(name), \ + .input = PRIMITIVES_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = PRIMITIVES_DATA(core_reloc_primitives), \ + .output_len = sizeof(struct core_reloc_primitives), \ +} + +#define PRIMITIVES_ERR_CASE(name) { \ + PRIMITIVES_CASE_COMMON(name), \ + .fails = true, \ +} + +#define MODS_CASE(name) { \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_mods.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o", \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) { \ + .a = 1, \ + .b = 2, \ + .c = (void *)3, \ + .d = (void *)4, \ + .e = { [2] = 5 }, \ + .f = { [1] = 6 }, \ + .g = { .x = 7 }, \ + .h = { .y = 8 }, \ + }, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_mods_output) { \ + .a = 1, .b = 2, .c = 3, .d = 4, \ + .e = 5, .f = 6, .g = 7, .h = 8, \ + }, \ + .output_len = sizeof(struct core_reloc_mods_output), \ +} + +#define PTR_AS_ARR_CASE(name) { \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_ptr_as_arr.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o", \ + .input = (const char *)&(struct core_reloc_##name []){ \ + { .a = 1 }, \ + { .a = 2 }, \ + { .a = 3 }, \ + }, \ + .input_len = 3 * sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_ptr_as_arr) { \ + .a = 3, \ + }, \ + .output_len = sizeof(struct core_reloc_ptr_as_arr), \ +} + +#define INTS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .u8_field = 1, \ + .s8_field = 2, \ + .u16_field = 3, \ + .s16_field = 4, \ + .u32_field = 5, \ + .s32_field = 6, \ + .u64_field = 7, \ + .s64_field = 8, \ +} + +#define INTS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_ints.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define INTS_CASE(name) { \ + INTS_CASE_COMMON(name), \ + .input = INTS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = INTS_DATA(core_reloc_ints), \ + .output_len = sizeof(struct core_reloc_ints), \ +} + +#define INTS_ERR_CASE(name) { \ + INTS_CASE_COMMON(name), \ + .fails = true, \ +} + +#define EXISTENCE_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_existence.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o", \ + .relaxed_core_relocs = true + +#define EXISTENCE_ERR_CASE(name) { \ + EXISTENCE_CASE_COMMON(name), \ + .fails = true, \ +} + +#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \ + .case_name = test_name_prefix#name, \ + .bpf_obj_file = objfile, \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define BITFIELDS_CASE(name, ...) { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ + "direct:", name), \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_bitfields_output), \ +}, { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ + "probed:", name), \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_bitfields_output), \ + .direct_raw_tp = true, \ +} + + +#define BITFIELDS_ERR_CASE(name) { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ + "probed:", name), \ + .fails = true, \ +}, { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ + "direct:", name), \ + .direct_raw_tp = true, \ + .fails = true, \ +} + +#define SIZE_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_size.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o", \ + .relaxed_core_relocs = true + +#define SIZE_OUTPUT_DATA(type) \ + STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \ + .int_sz = sizeof(((type *)0)->int_field), \ + .struct_sz = sizeof(((type *)0)->struct_field), \ + .union_sz = sizeof(((type *)0)->union_field), \ + .arr_sz = sizeof(((type *)0)->arr_field), \ + .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \ + .ptr_sz = sizeof(((type *)0)->ptr_field), \ + .enum_sz = sizeof(((type *)0)->enum_field), \ + } + +#define SIZE_CASE(name) { \ + SIZE_CASE_COMMON(name), \ + .input_len = 0, \ + .output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \ + .output_len = sizeof(struct core_reloc_size_output), \ +} + +#define SIZE_ERR_CASE(name) { \ + SIZE_CASE_COMMON(name), \ + .fails = true, \ +} + +struct core_reloc_test_case { + const char *case_name; + const char *bpf_obj_file; + const char *btf_src_file; + const char *input; + int input_len; + const char *output; + int output_len; + bool fails; + bool relaxed_core_relocs; + bool direct_raw_tp; +}; + +static struct core_reloc_test_case test_cases[] = { + /* validate we can find kernel image and use its BTF for relocs */ + { + .case_name = "kernel", + .bpf_obj_file = "test_core_reloc_kernel.o", + .btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */ + .input = "", + .input_len = 0, + .output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) { + .valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, + .comm = "test_progs", + .comm_len = sizeof("test_progs"), + }, + .output_len = sizeof(struct core_reloc_kernel_output), + }, + + /* validate BPF program can use multiple flavors to match against + * single target BTF type + */ + FLAVORS_CASE(flavors), + + FLAVORS_ERR_CASE(flavors__err_wrong_name), + + /* various struct/enum nesting and resolution scenarios */ + NESTING_CASE(nesting), + NESTING_CASE(nesting___anon_embed), + NESTING_CASE(nesting___struct_union_mixup), + NESTING_CASE(nesting___extra_nesting), + NESTING_CASE(nesting___dup_compat_types), + + NESTING_ERR_CASE(nesting___err_missing_field), + NESTING_ERR_CASE(nesting___err_array_field), + NESTING_ERR_CASE(nesting___err_missing_container), + NESTING_ERR_CASE(nesting___err_nonstruct_container), + NESTING_ERR_CASE(nesting___err_array_container), + NESTING_ERR_CASE(nesting___err_dup_incompat_types), + NESTING_ERR_CASE(nesting___err_partial_match_dups), + NESTING_ERR_CASE(nesting___err_too_deep), + + /* various array access relocation scenarios */ + ARRAYS_CASE(arrays), + ARRAYS_CASE(arrays___diff_arr_dim), + ARRAYS_CASE(arrays___diff_arr_val_sz), + + ARRAYS_ERR_CASE(arrays___err_too_small), + ARRAYS_ERR_CASE(arrays___err_too_shallow), + ARRAYS_ERR_CASE(arrays___err_non_array), + ARRAYS_ERR_CASE(arrays___err_wrong_val_type1), + ARRAYS_ERR_CASE(arrays___err_wrong_val_type2), + + /* enum/ptr/int handling scenarios */ + PRIMITIVES_CASE(primitives), + PRIMITIVES_CASE(primitives___diff_enum_def), + PRIMITIVES_CASE(primitives___diff_func_proto), + PRIMITIVES_CASE(primitives___diff_ptr_type), + + PRIMITIVES_ERR_CASE(primitives___err_non_enum), + PRIMITIVES_ERR_CASE(primitives___err_non_int), + PRIMITIVES_ERR_CASE(primitives___err_non_ptr), + + /* const/volatile/restrict and typedefs scenarios */ + MODS_CASE(mods), + MODS_CASE(mods___mod_swap), + MODS_CASE(mods___typedefs), + + /* handling "ptr is an array" semantics */ + PTR_AS_ARR_CASE(ptr_as_arr), + PTR_AS_ARR_CASE(ptr_as_arr___diff_sz), + + /* int signedness/sizing/bitfield handling */ + INTS_CASE(ints), + INTS_CASE(ints___bool), + INTS_CASE(ints___reverse_sign), + + /* validate edge cases of capturing relocations */ + { + .case_name = "misc", + .bpf_obj_file = "test_core_reloc_misc.o", + .btf_src_file = "btf__core_reloc_misc.o", + .input = (const char *)&(struct core_reloc_misc_extensible[]){ + { .a = 1 }, + { .a = 2 }, /* not read */ + { .a = 3 }, + }, + .input_len = 4 * sizeof(int), + .output = STRUCT_TO_CHAR_PTR(core_reloc_misc_output) { + .a = 1, + .b = 1, + .c = 0, /* BUG in clang, should be 3 */ + }, + .output_len = sizeof(struct core_reloc_misc_output), + }, + + /* validate field existence checks */ + { + EXISTENCE_CASE_COMMON(existence), + .input = STRUCT_TO_CHAR_PTR(core_reloc_existence) { + .a = 1, + .b = 2, + .c = 3, + .arr = { 4 }, + .s = { .x = 5 }, + }, + .input_len = sizeof(struct core_reloc_existence), + .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) { + .a_exists = 1, + .b_exists = 1, + .c_exists = 1, + .arr_exists = 1, + .s_exists = 1, + .a_value = 1, + .b_value = 2, + .c_value = 3, + .arr_value = 4, + .s_value = 5, + }, + .output_len = sizeof(struct core_reloc_existence_output), + }, + { + EXISTENCE_CASE_COMMON(existence___minimal), + .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) { + .a = 42, + }, + .input_len = sizeof(struct core_reloc_existence), + .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) { + .a_exists = 1, + .b_exists = 0, + .c_exists = 0, + .arr_exists = 0, + .s_exists = 0, + .a_value = 42, + .b_value = 0xff000002u, + .c_value = 0xff000003u, + .arr_value = 0xff000004u, + .s_value = 0xff000005u, + }, + .output_len = sizeof(struct core_reloc_existence_output), + }, + + EXISTENCE_ERR_CASE(existence__err_int_sz), + EXISTENCE_ERR_CASE(existence__err_int_type), + EXISTENCE_ERR_CASE(existence__err_int_kind), + EXISTENCE_ERR_CASE(existence__err_arr_kind), + EXISTENCE_ERR_CASE(existence__err_arr_value_type), + EXISTENCE_ERR_CASE(existence__err_struct_type), + + /* bitfield relocation checks */ + BITFIELDS_CASE(bitfields, { + .ub1 = 1, + .ub2 = 2, + .ub7 = 96, + .sb4 = -7, + .sb20 = -0x76543, + .u32 = 0x80000000, + .s32 = -0x76543210, + }), + BITFIELDS_CASE(bitfields___bit_sz_change, { + .ub1 = 6, + .ub2 = 0xABCDE, + .ub7 = 1, + .sb4 = -1, + .sb20 = -0x17654321, + .u32 = 0xBEEF, + .s32 = -0x3FEDCBA987654321, + }), + BITFIELDS_CASE(bitfields___bitfield_vs_int, { + .ub1 = 0xFEDCBA9876543210, + .ub2 = 0xA6, + .ub7 = -0x7EDCBA987654321, + .sb4 = -0x6123456789ABCDE, + .sb20 = 0xD00D, + .u32 = -0x76543, + .s32 = 0x0ADEADBEEFBADB0B, + }), + BITFIELDS_CASE(bitfields___just_big_enough, { + .ub1 = 0xF, + .ub2 = 0x0812345678FEDCBA, + }), + BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), + + /* size relocation checks */ + SIZE_CASE(size), + SIZE_CASE(size___diff_sz), +}; + +struct data { + char in[256]; + char out[256]; + uint64_t my_pid_tgid; +}; + +static size_t roundup_page(size_t sz) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + return (sz + page_size - 1) / page_size * page_size; +} + +void test_core_reloc(void) +{ + const size_t mmap_sz = roundup_page(sizeof(struct data)); + struct bpf_object_load_attr load_attr = {}; + struct core_reloc_test_case *test_case; + const char *tp_name, *probe_name; + int err, duration = 0, i, equal; + struct bpf_link *link = NULL; + struct bpf_map *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + uint64_t my_pid_tgid; + struct data *data; + void *mmap_data = NULL; + + my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32); + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + test_case = &test_cases[i]; + if (!test__start_subtest(test_case->case_name)) + continue; + + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .relaxed_core_relocs = test_case->relaxed_core_relocs, + ); + + obj = bpf_object__open_file(test_case->bpf_obj_file, &opts); + if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n", + test_case->bpf_obj_file, PTR_ERR(obj))) + continue; + + /* for typed raw tracepoints, NULL should be specified */ + if (test_case->direct_raw_tp) { + probe_name = "tp_btf/sys_enter"; + tp_name = NULL; + } else { + probe_name = "raw_tracepoint/sys_enter"; + tp_name = "sys_enter"; + } + + prog = bpf_object__find_program_by_title(obj, probe_name); + if (CHECK(!prog, "find_probe", + "prog '%s' not found\n", probe_name)) + goto cleanup; + + load_attr.obj = obj; + load_attr.log_level = 0; + load_attr.target_btf_path = test_case->btf_src_file; + err = bpf_object__load_xattr(&load_attr); + if (test_case->fails) { + CHECK(!err, "obj_load_fail", + "should fail to load prog '%s'\n", probe_name); + goto cleanup; + } else { + if (CHECK(err, "obj_load", + "failed to load prog '%s': %d\n", + probe_name, err)) + goto cleanup; + } + + data_map = bpf_object__find_map_by_name(obj, "test_cor.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto cleanup; + + mmap_data = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_SHARED, bpf_map__fd(data_map), 0); + if (CHECK(mmap_data == MAP_FAILED, "mmap", + ".bss mmap failed: %d", errno)) { + mmap_data = NULL; + goto cleanup; + } + data = mmap_data; + + memset(mmap_data, 0, sizeof(*data)); + memcpy(data->in, test_case->input, test_case->input_len); + data->my_pid_tgid = my_pid_tgid; + + link = bpf_program__attach_raw_tracepoint(prog, tp_name); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", + PTR_ERR(link))) + goto cleanup; + + /* trigger test run */ + usleep(1); + + equal = memcmp(data->out, test_case->output, + test_case->output_len) == 0; + if (CHECK(!equal, "check_result", + "input/output data don't match\n")) { + int j; + + for (j = 0; j < test_case->input_len; j++) { + printf("input byte #%d: 0x%02hhx\n", + j, test_case->input[j]); + } + for (j = 0; j < test_case->output_len; j++) { + printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n", + j, test_case->output[j], data->out[j]); + } + goto cleanup; + } + +cleanup: + if (mmap_data) { + CHECK_FAIL(munmap(mmap_data, mmap_sz)); + mmap_data = NULL; + } + if (!IS_ERR_OR_NULL(link)) { + bpf_link__destroy(link); + link = NULL; + } + bpf_object__close(obj); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c new file mode 100644 index 000000000000..40bcff2cc274 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fentry_fexit(void) +{ + struct bpf_prog_load_attr attr_fentry = { + .file = "./fentry_test.o", + }; + struct bpf_prog_load_attr attr_fexit = { + .file = "./fexit_test.o", + }; + + struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj; + struct bpf_map *data_map_fentry, *data_map_fexit; + char fentry_name[] = "fentry/bpf_fentry_testX"; + char fexit_name[] = "fexit/bpf_fentry_testX"; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[12] = {}; + struct bpf_program *prog[12]; + __u32 duration, retval; + const int zero = 0; + u64 result[12]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + fentry_name[sizeof(fentry_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss"); + if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n")) + goto close_prog; + + for (i = 6; i < 12; i++) { + fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6; + prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss"); + if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 12; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i % 6 + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 12; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj_fentry); + bpf_object__close(obj_fexit); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c new file mode 100644 index 000000000000..9fb103193878 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fentry_test(void) +{ + struct bpf_prog_load_attr attr = { + .file = "./fentry_test.o", + }; + + char prog_name[] = "fentry/bpf_fentry_testX"; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[6] = {}; + struct bpf_program *prog[6]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[6]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + prog_name[sizeof(prog_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 6; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 6; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c new file mode 100644 index 000000000000..15c7378362dd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +#define PROG_CNT 3 + +void test_fexit_bpf2bpf(void) +{ + const char *prog_name[PROG_CNT] = { + "fexit/test_pkt_access", + "fexit/test_pkt_access_subprog1", + "fexit/test_pkt_access_subprog2", + }; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, i; + struct bpf_link *link[PROG_CNT] = {}; + struct bpf_program *prog[PROG_CNT]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[PROG_CNT]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .attach_prog_fd = pkt_fd, + ); + + obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts); + if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", + "failed to open fexit_bpf2bpf: %ld\n", + PTR_ERR(obj))) + goto close_prog; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto close_prog; + + for (i = 0; i < PROG_CNT; i++) { + prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i])) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < PROG_CNT; i++) + if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n", + result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < PROG_CNT; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + if (!IS_ERR_OR_NULL(obj)) + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c new file mode 100644 index 000000000000..3b9dbf7433f0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +/* x86-64 fits 55 JITed and 43 interpreted progs into half page */ +#define CNT 40 + +void test_fexit_stress(void) +{ + char test_skb[128] = {}; + int fexit_fd[CNT] = {}; + int link_fd[CNT] = {}; + __u32 duration = 0; + char error[4096]; + __u32 prog_ret; + int err, i, filter_fd; + + const struct bpf_insn trace_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr load_attr = { + .prog_type = BPF_PROG_TYPE_TRACING, + .license = "GPL", + .insns = trace_program, + .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn), + .expected_attach_type = BPF_TRACE_FEXIT, + }; + + const struct bpf_insn skb_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr skb_load_attr = { + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .license = "GPL", + .insns = skb_program, + .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn), + }; + + err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1", + load_attr.expected_attach_type); + if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err)) + goto out; + load_attr.attach_btf_id = err; + + for (i = 0; i < CNT; i++) { + fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + if (CHECK(fexit_fd[i] < 0, "fexit loaded", + "failed: %d errno %d\n", fexit_fd[i], errno)) + goto out; + link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]); + if (CHECK(link_fd[i] < 0, "fexit attach failed", + "prog %d failed: %d err %d\n", i, link_fd[i], errno)) + goto out; + } + + filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error)); + if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", + filter_fd, errno)) + goto out; + + err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, + 0, &prog_ret, 0); + close(filter_fd); + CHECK_FAIL(err); +out: + for (i = 0; i < CNT; i++) { + if (link_fd[i]) + close(link_fd[i]); + if (fexit_fd[i]) + close(fexit_fd[i]); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c new file mode 100644 index 000000000000..f99013222c74 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fexit_test(void) +{ + struct bpf_prog_load_attr attr = { + .file = "./fexit_test.o", + }; + + char prog_name[] = "fexit/bpf_fentry_testX"; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[6] = {}; + struct bpf_program *prog[6]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[6]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + prog_name[sizeof(prog_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fexit_te.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 6; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 6; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index fc818bc1d729..92563898867c 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -1,8 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include <error.h> +#include <linux/if.h> +#include <linux/if_tun.h> +#include <sys/uio.h> + +#ifndef IP_MF +#define IP_MF 0x2000 +#endif #define CHECK_FLOW_KEYS(desc, got, expected) \ - CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ + CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ desc, \ "nhoff=%u/%u " \ "thoff=%u/%u " \ @@ -10,7 +18,9 @@ "is_frag=%u/%u " \ "is_first_frag=%u/%u " \ "is_encap=%u/%u " \ + "ip_proto=0x%x/0x%x " \ "n_proto=0x%x/0x%x " \ + "flow_label=0x%x/0x%x " \ "sport=%u/%u " \ "dport=%u/%u\n", \ got.nhoff, expected.nhoff, \ @@ -19,53 +29,52 @@ got.is_frag, expected.is_frag, \ got.is_first_frag, expected.is_first_frag, \ got.is_encap, expected.is_encap, \ + got.ip_proto, expected.ip_proto, \ got.n_proto, expected.n_proto, \ + got.flow_label, expected.flow_label, \ got.sport, expected.sport, \ got.dport, expected.dport) -static struct bpf_flow_keys pkt_v4_flow_keys = { - .nhoff = 0, - .thoff = sizeof(struct iphdr), - .addr_proto = ETH_P_IP, - .ip_proto = IPPROTO_TCP, - .n_proto = __bpf_constant_htons(ETH_P_IP), -}; - -static struct bpf_flow_keys pkt_v6_flow_keys = { - .nhoff = 0, - .thoff = sizeof(struct ipv6hdr), - .addr_proto = ETH_P_IPV6, - .ip_proto = IPPROTO_TCP, - .n_proto = __bpf_constant_htons(ETH_P_IPV6), -}; +struct ipv4_pkt { + struct ethhdr eth; + struct iphdr iph; + struct tcphdr tcp; +} __packed; -#define VLAN_HLEN 4 +struct ipip_pkt { + struct ethhdr eth; + struct iphdr iph; + struct iphdr iph_inner; + struct tcphdr tcp; +} __packed; -static struct { +struct svlan_ipv4_pkt { struct ethhdr eth; __u16 vlan_tci; __u16 vlan_proto; struct iphdr iph; struct tcphdr tcp; -} __packed pkt_vlan_v4 = { - .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q), - .vlan_proto = __bpf_constant_htons(ETH_P_IP), - .iph.ihl = 5, - .iph.protocol = IPPROTO_TCP, - .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), - .tcp.urg_ptr = 123, - .tcp.doff = 5, -}; +} __packed; -static struct bpf_flow_keys pkt_vlan_v4_flow_keys = { - .nhoff = VLAN_HLEN, - .thoff = VLAN_HLEN + sizeof(struct iphdr), - .addr_proto = ETH_P_IP, - .ip_proto = IPPROTO_TCP, - .n_proto = __bpf_constant_htons(ETH_P_IP), -}; +struct ipv6_pkt { + struct ethhdr eth; + struct ipv6hdr iph; + struct tcphdr tcp; +} __packed; -static struct { +struct ipv6_frag_pkt { + struct ethhdr eth; + struct ipv6hdr iph; + struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; + } ipf; + struct tcphdr tcp; +} __packed; + +struct dvlan_ipv6_pkt { struct ethhdr eth; __u16 vlan_tci; __u16 vlan_proto; @@ -73,68 +82,447 @@ static struct { __u16 vlan_proto2; struct ipv6hdr iph; struct tcphdr tcp; -} __packed pkt_vlan_v6 = { - .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD), - .vlan_proto = __bpf_constant_htons(ETH_P_8021Q), - .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6), - .iph.nexthdr = IPPROTO_TCP, - .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), - .tcp.urg_ptr = 123, - .tcp.doff = 5, +} __packed; + +struct test { + const char *name; + union { + struct ipv4_pkt ipv4; + struct svlan_ipv4_pkt svlan_ipv4; + struct ipip_pkt ipip; + struct ipv6_pkt ipv6; + struct ipv6_frag_pkt ipv6_frag; + struct dvlan_ipv6_pkt dvlan_ipv6; + } pkt; + struct bpf_flow_keys keys; + __u32 flags; }; -static struct bpf_flow_keys pkt_vlan_v6_flow_keys = { - .nhoff = VLAN_HLEN * 2, - .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr), - .addr_proto = ETH_P_IPV6, - .ip_proto = IPPROTO_TCP, - .n_proto = __bpf_constant_htons(ETH_P_IPV6), +#define VLAN_HLEN 4 + +struct test tests[] = { + { + .name = "ipv4", + .pkt.ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .sport = 80, + .dport = 8080, + }, + }, + { + .name = "ipv6", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + }, + }, + { + .name = "802.1q-ipv4", + .pkt.svlan_ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q), + .vlan_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN + VLAN_HLEN, + .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .sport = 80, + .dport = 8080, + }, + }, + { + .name = "802.1ad-ipv6", + .pkt.dvlan_ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD), + .vlan_proto = __bpf_constant_htons(ETH_P_8021Q), + .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN + VLAN_HLEN * 2, + .thoff = ETH_HLEN + VLAN_HLEN * 2 + + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + }, + }, + { + .name = "ipv4-frag", + .pkt.ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.frag_off = __bpf_constant_htons(IP_MF), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_frag = true, + .is_first_frag = true, + .sport = 80, + .dport = 8080, + }, + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + }, + { + .name = "ipv4-no-frag", + .pkt.ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.frag_off = __bpf_constant_htons(IP_MF), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_frag = true, + .is_first_frag = true, + }, + }, + { + .name = "ipv6-frag", + .pkt.ipv6_frag = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_FRAGMENT, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .ipf.nexthdr = IPPROTO_TCP, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + + sizeof(struct frag_hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .is_frag = true, + .is_first_frag = true, + .sport = 80, + .dport = 8080, + }, + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + }, + { + .name = "ipv6-no-frag", + .pkt.ipv6_frag = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_FRAGMENT, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .ipf.nexthdr = IPPROTO_TCP, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + + sizeof(struct frag_hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .is_frag = true, + .is_first_frag = true, + }, + }, + { + .name = "ipv6-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0xb, 0xee, 0xef }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + .flow_label = __bpf_constant_htonl(0xbeeef), + }, + }, + { + .name = "ipv6-no-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0xb, 0xee, 0xef }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .flow_label = __bpf_constant_htonl(0xbeeef), + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + }, + { + .name = "ipip-encap", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_IPIP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES) - + sizeof(struct iphdr), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr) + + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + .sport = 80, + .dport = 8080, + }, + }, + { + .name = "ipip-no-encap", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_IPIP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES) - + sizeof(struct iphdr), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_IPIP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + }, }; +static int create_tap(const char *ifname) +{ + struct ifreq ifr = { + .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS, + }; + int fd, ret; + + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); + + fd = open("/dev/net/tun", O_RDWR); + if (fd < 0) + return -1; + + ret = ioctl(fd, TUNSETIFF, &ifr); + if (ret) + return -1; + + return fd; +} + +static int tx_tap(int fd, void *pkt, size_t len) +{ + struct iovec iov[] = { + { + .iov_len = len, + .iov_base = pkt, + }, + }; + return writev(fd, iov, ARRAY_SIZE(iov)); +} + +static int ifup(const char *ifname) +{ + struct ifreq ifr = {}; + int sk, ret; + + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); + + sk = socket(PF_INET, SOCK_DGRAM, 0); + if (sk < 0) + return -1; + + ret = ioctl(sk, SIOCGIFFLAGS, &ifr); + if (ret) { + close(sk); + return -1; + } + + ifr.ifr_flags |= IFF_UP; + ret = ioctl(sk, SIOCSIFFLAGS, &ifr); + if (ret) { + close(sk); + return -1; + } + + close(sk); + return 0; +} + void test_flow_dissector(void) { - struct bpf_flow_keys flow_keys; + int i, err, prog_fd, keys_fd = -1, tap_fd; struct bpf_object *obj; - __u32 duration, retval; - int err, prog_fd; - __u32 size; + __u32 duration = 0; err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", - "jmp_table", &prog_fd); - if (err) { - error_cnt++; + "jmp_table", "last_dissection", &prog_fd, &keys_fd); + if (CHECK_FAIL(err)) return; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + struct bpf_flow_keys flow_keys; + struct bpf_prog_test_run_attr tattr = { + .prog_fd = prog_fd, + .data_in = &tests[i].pkt, + .data_size_in = sizeof(tests[i].pkt), + .data_out = &flow_keys, + }; + static struct bpf_flow_keys ctx = {}; + + if (tests[i].flags) { + tattr.ctx_in = &ctx; + tattr.ctx_size_in = sizeof(ctx); + ctx.flags = tests[i].flags; + } + + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || + err || tattr.retval != 1, + tests[i].name, + "err %d errno %d retval %d duration %d size %u/%lu\n", + err, errno, tattr.retval, tattr.duration, + tattr.data_size_out, sizeof(flow_keys)); + CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); } - err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), - &flow_keys, &size, &retval, &duration); - CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4", - "err %d errno %d retval %d duration %d size %u/%lu\n", - err, errno, retval, duration, size, sizeof(flow_keys)); - CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys); - - err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6), - &flow_keys, &size, &retval, &duration); - CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6", - "err %d errno %d retval %d duration %d size %u/%lu\n", - err, errno, retval, duration, size, sizeof(flow_keys)); - CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); - - err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4), - &flow_keys, &size, &retval, &duration); - CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4", - "err %d errno %d retval %d duration %d size %u/%lu\n", - err, errno, retval, duration, size, sizeof(flow_keys)); - CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys, - pkt_vlan_v4_flow_keys); - - err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6), - &flow_keys, &size, &retval, &duration); - CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6", - "err %d errno %d retval %d duration %d size %u/%lu\n", - err, errno, retval, duration, size, sizeof(flow_keys)); - CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys, - pkt_vlan_v6_flow_keys); + /* Do the same tests but for skb-less flow dissector. + * We use a known path in the net/tun driver that calls + * eth_get_headlen and we manually export bpf_flow_keys + * via BPF map in this case. + */ + + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno); + + tap_fd = create_tap("tap0"); + CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); + err = ifup("tap0"); + CHECK(err, "ifup", "err %d errno %d\n", err, errno); + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + /* Keep in sync with 'flags' from eth_get_headlen. */ + __u32 eth_get_headlen_flags = + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; + struct bpf_prog_test_run_attr tattr = {}; + struct bpf_flow_keys flow_keys = {}; + __u32 key = (__u32)(tests[i].keys.sport) << 16 | + tests[i].keys.dport; + + /* For skb-less case we can't pass input flags; run + * only the tests that have a matching set of flags. + */ + + if (tests[i].flags != eth_get_headlen_flags) + continue; + + err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); + CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); + + err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); + CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); + + CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); + CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + + err = bpf_map_delete_elem(keys_fd, &key); + CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); + } + bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c new file mode 100644 index 000000000000..dc5ef155ec28 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_flow_dissector_load_bytes(void) +{ + struct bpf_flow_keys flow_keys; + __u32 duration = 0, retval, size; + struct bpf_insn prog[] = { + // BPF_REG_1 - 1st argument: context + // BPF_REG_2 - 2nd argument: offset, start at first byte + BPF_MOV64_IMM(BPF_REG_2, 0), + // BPF_REG_3 - 3rd argument: destination, reserve byte on stack + BPF_ALU64_REG(BPF_MOV, BPF_REG_3, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -1), + // BPF_REG_4 - 4th argument: copy one byte + BPF_MOV64_IMM(BPF_REG_4, 1), + // bpf_skb_load_bytes(ctx, sizeof(pkt_v4), ptr, 1) + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_skb_load_bytes), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + // if (ret == 0) return BPF_DROP (2) + BPF_MOV64_IMM(BPF_REG_0, BPF_DROP), + BPF_EXIT_INSN(), + // if (ret != 0) return BPF_OK (0) + BPF_MOV64_IMM(BPF_REG_0, BPF_OK), + BPF_EXIT_INSN(), + }; + int fd, err; + + /* make sure bpf_skb_load_bytes is not allowed from skb-less context + */ + fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog, + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + CHECK(fd < 0, + "flow_dissector-bpf_skb_load_bytes-load", + "fd %d errno %d\n", + fd, errno); + + err = bpf_prog_test_run(fd, 1, &pkt_v4, sizeof(pkt_v4), + &flow_keys, &size, &retval, &duration); + CHECK(size != sizeof(flow_keys) || err || retval != 1, + "flow_dissector-bpf_skb_load_bytes", + "err %d errno %d retval %d duration %d size %u/%zu\n", + err, errno, retval, duration, size, sizeof(flow_keys)); + + if (fd >= -1) + close(fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c new file mode 100644 index 000000000000..1f51ba66b98b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test that the flow_dissector program can be updated with a single + * syscall by attaching a new program that replaces the existing one. + * + * Corner case - the same program cannot be attached twice. + */ + +#define _GNU_SOURCE +#include <errno.h> +#include <fcntl.h> +#include <sched.h> +#include <stdbool.h> +#include <unistd.h> + +#include <linux/bpf.h> +#include <bpf/bpf.h> + +#include "test_progs.h" + +static bool is_attached(int netns) +{ + __u32 cnt; + int err; + + err = bpf_prog_query(netns, BPF_FLOW_DISSECTOR, 0, NULL, NULL, &cnt); + if (CHECK_FAIL(err)) { + perror("bpf_prog_query"); + return true; /* fail-safe */ + } + + return cnt > 0; +} + +static int load_prog(void) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, BPF_OK), + BPF_EXIT_INSN(), + }; + int fd; + + fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog, + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + if (CHECK_FAIL(fd < 0)) + perror("bpf_load_program"); + + return fd; +} + +static void do_flow_dissector_reattach(void) +{ + int prog_fd[2] = { -1, -1 }; + int err; + + prog_fd[0] = load_prog(); + if (prog_fd[0] < 0) + return; + + prog_fd[1] = load_prog(); + if (prog_fd[1] < 0) + goto out_close; + + err = bpf_prog_attach(prog_fd[0], 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach-0"); + goto out_close; + } + + /* Expect success when attaching a different program */ + err = bpf_prog_attach(prog_fd[1], 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach-1"); + goto out_detach; + } + + /* Expect failure when attaching the same program twice */ + err = bpf_prog_attach(prog_fd[1], 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_prog_attach-2"); + +out_detach: + err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(err)) + perror("bpf_prog_detach"); + +out_close: + close(prog_fd[1]); + close(prog_fd[0]); +} + +void test_flow_dissector_reattach(void) +{ + int init_net, self_net, err; + + self_net = open("/proc/self/ns/net", O_RDONLY); + if (CHECK_FAIL(self_net < 0)) { + perror("open(/proc/self/ns/net"); + return; + } + + init_net = open("/proc/1/ns/net", O_RDONLY); + if (CHECK_FAIL(init_net < 0)) { + perror("open(/proc/1/ns/net)"); + goto out_close; + } + + err = setns(init_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("setns(/proc/1/ns/net)"); + goto out_close; + } + + if (is_attached(init_net)) { + test__skip(); + printf("Can't test with flow dissector attached to init_net\n"); + goto out_setns; + } + + /* First run tests in root network namespace */ + do_flow_dissector_reattach(); + + /* Then repeat tests in a non-root namespace */ + err = unshare(CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("unshare(CLONE_NEWNET)"); + goto out_setns; + } + do_flow_dissector_reattach(); + +out_setns: + /* Move back to netns we started in. */ + err = setns(self_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) + perror("setns(/proc/self/ns/net)"); + +out_close: + close(init_net); + close(self_net); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index d7bb5beb1c57..eba9a970703b 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -1,8 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <sys/socket.h> #include <test_progs.h> #define MAX_CNT_RAWTP 10ull #define MAX_STACK_RAWTP 100 + +static int duration = 0; + struct get_stack_trace_t { int pid; int kern_stack_size; @@ -13,7 +20,7 @@ struct get_stack_trace_t { struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; }; -static int get_stack_print_output(void *data, int size) +static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) { bool good_kern_stack = false, good_user_stack = false; const char *nonjit_func = "___bpf_prog_run"; @@ -34,12 +41,12 @@ static int get_stack_print_output(void *data, int size) * just assume it is good if the stack is not empty. * This could be improved in the future. */ - if (jit_enabled) { + if (env.jit_enabled) { found = num_stack > 0; } else { for (i = 0; i < num_stack; i++) { ks = ksym_search(raw_data[i]); - if (strcmp(ks->name, nonjit_func) == 0) { + if (ks && (strcmp(ks->name, nonjit_func) == 0)) { found = true; break; } @@ -51,12 +58,12 @@ static int get_stack_print_output(void *data, int size) } } else { num_stack = e->kern_stack_size / sizeof(__u64); - if (jit_enabled) { + if (env.jit_enabled) { good_kern_stack = num_stack > 0; } else { for (i = 0; i < num_stack; i++) { ks = ksym_search(e->kern_stack[i]); - if (strcmp(ks->name, nonjit_func) == 0) { + if (ks && (strcmp(ks->name, nonjit_func) == 0)) { good_kern_stack = true; break; } @@ -65,75 +72,73 @@ static int get_stack_print_output(void *data, int size) if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0) good_user_stack = true; } - if (!good_kern_stack || !good_user_stack) - return LIBBPF_PERF_EVENT_ERROR; - if (cnt == MAX_CNT_RAWTP) - return LIBBPF_PERF_EVENT_DONE; - - return LIBBPF_PERF_EVENT_CONT; + if (!good_kern_stack) + CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n"); + if (!good_user_stack) + CHECK(!good_user_stack, "user_stack", "corrupted user stack\n"); } void test_get_stack_raw_tp(void) { const char *file = "./test_get_stack_rawtp.o"; - int i, efd, err, prog_fd, pmu_fd, perfmap_fd; - struct perf_event_attr attr = {}; + const char *prog_name = "raw_tracepoint/sys_enter"; + int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP; + struct perf_buffer_opts pb_opts = {}; + struct perf_buffer *pb = NULL; + struct bpf_link *link = NULL; struct timespec tv = {0, 10}; - __u32 key = 0, duration = 0; + struct bpf_program *prog; struct bpf_object *obj; + struct bpf_map *map; + cpu_set_t cpu_set; err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) return; - efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); - if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) goto close_prog; - perfmap_fd = bpf_find_map(__func__, obj, "perfmap"); - if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n", - perfmap_fd, errno)) + map = bpf_object__find_map_by_name(obj, "perfmap"); + if (CHECK(!map, "bpf_find_map", "not found\n")) goto close_prog; err = load_kallsyms(); if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno)) goto close_prog; - attr.sample_type = PERF_SAMPLE_RAW; - attr.type = PERF_TYPE_SOFTWARE; - attr.config = PERF_COUNT_SW_BPF_OUTPUT; - pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/, - -1/*group_fd*/, 0); - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, - errno)) + CPU_ZERO(&cpu_set); + CPU_SET(0, &cpu_set); + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno)) goto close_prog; - err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY); - if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err, - errno)) + link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) goto close_prog; - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n", - err, errno)) - goto close_prog; - - err = perf_event_mmap(pmu_fd); - if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno)) + pb_opts.sample_cb = get_stack_print_output; + pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts); + if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) goto close_prog; /* trigger some syscall action */ for (i = 0; i < MAX_CNT_RAWTP; i++) nanosleep(&tv, NULL); - err = perf_event_poller(pmu_fd, get_stack_print_output); - if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno)) - goto close_prog; + while (exp_cnt > 0) { + err = perf_buffer__poll(pb, 100); + if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err)) + goto close_prog; + exp_cnt -= err; + } - goto close_prog_noerr; close_prog: - error_cnt++; -close_prog_noerr: + if (!IS_ERR_OR_NULL(link)) + bpf_link__destroy(link); + if (!IS_ERR_OR_NULL(pb)) + perf_buffer__free(pb); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c new file mode 100644 index 000000000000..c680926fce73 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static void test_global_data_number(struct bpf_object *obj, __u32 duration) +{ + int i, err, map_fd; + uint64_t num; + + map_fd = bpf_find_map(__func__, obj, "result_number"); + if (CHECK_FAIL(map_fd < 0)) + return; + + struct { + char *name; + uint32_t key; + uint64_t num; + } tests[] = { + { "relocate .bss reference", 0, 0 }, + { "relocate .data reference", 1, 42 }, + { "relocate .rodata reference", 2, 24 }, + { "relocate .bss reference", 3, 0 }, + { "relocate .data reference", 4, 0xffeeff }, + { "relocate .rodata reference", 5, 0xabab }, + { "relocate .bss reference", 6, 1234 }, + { "relocate .bss reference", 7, 0 }, + { "relocate .rodata reference", 8, 0xab }, + { "relocate .rodata reference", 9, 0x1111111111111111 }, + { "relocate .rodata reference", 10, ~0 }, + }; + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); + CHECK(err || num != tests[i].num, tests[i].name, + "err %d result %lx expected %lx\n", + err, num, tests[i].num); + } +} + +static void test_global_data_string(struct bpf_object *obj, __u32 duration) +{ + int i, err, map_fd; + char str[32]; + + map_fd = bpf_find_map(__func__, obj, "result_string"); + if (CHECK_FAIL(map_fd < 0)) + return; + + struct { + char *name; + uint32_t key; + char str[32]; + } tests[] = { + { "relocate .rodata reference", 0, "abcdefghijklmnopqrstuvwxyz" }, + { "relocate .data reference", 1, "abcdefghijklmnopqrstuvwxyz" }, + { "relocate .bss reference", 2, "" }, + { "relocate .data reference", 3, "abcdexghijklmnopqrstuvwxyz" }, + { "relocate .bss reference", 4, "\0\0hello" }, + }; + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + err = bpf_map_lookup_elem(map_fd, &tests[i].key, str); + CHECK(err || memcmp(str, tests[i].str, sizeof(str)), + tests[i].name, "err %d result \'%s\' expected \'%s\'\n", + err, str, tests[i].str); + } +} + +struct foo { + __u8 a; + __u32 b; + __u64 c; +}; + +static void test_global_data_struct(struct bpf_object *obj, __u32 duration) +{ + int i, err, map_fd; + struct foo val; + + map_fd = bpf_find_map(__func__, obj, "result_struct"); + if (CHECK_FAIL(map_fd < 0)) + return; + + struct { + char *name; + uint32_t key; + struct foo val; + } tests[] = { + { "relocate .rodata reference", 0, { 42, 0xfefeefef, 0x1111111111111111ULL, } }, + { "relocate .bss reference", 1, { } }, + { "relocate .rodata reference", 2, { } }, + { "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } }, + }; + + for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val); + CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)), + tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n", + err, val.a, val.b, val.c, tests[i].val.a, tests[i].val.b, tests[i].val.c); + } +} + +static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration) +{ + int err = -ENOMEM, map_fd, zero = 0; + struct bpf_map *map; + __u8 *buff; + + map = bpf_object__find_map_by_name(obj, "test_glo.rodata"); + if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) + return; + + map_fd = bpf_map__fd(map); + if (CHECK_FAIL(map_fd < 0)) + return; + + buff = malloc(bpf_map__def(map)->value_size); + if (buff) + err = bpf_map_update_elem(map_fd, &zero, buff, 0); + free(buff); + CHECK(!err || errno != EPERM, "test .rodata read-only map", + "err %d errno %d\n", err, errno); +} + +void test_global_data(void) +{ + const char *file = "./test_global_data.o"; + __u32 duration = 0, retval; + struct bpf_object *obj; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (CHECK(err, "load program", "error %d loading %s\n", err, file)) + return; + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "pass global data run", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + test_global_data_number(obj, duration); + test_global_data_string(obj, duration); + test_global_data_struct(obj, duration); + test_global_data_rdonly(obj, duration); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c new file mode 100644 index 000000000000..7507c8f689bc --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +struct meta { + int ifindex; + __u32 cb32_0; + __u8 cb8_0; +}; + +static union { + __u32 cb32[5]; + __u8 cb8[20]; +} cb = { + .cb32[0] = 0x81828384, +}; + +static void on_sample(void *ctx, int cpu, void *data, __u32 size) +{ + struct meta *meta = (struct meta *)data; + struct ipv6_packet *pkt_v6 = data + sizeof(*meta); + int duration = 0; + + if (CHECK(size != 72 + sizeof(*meta), "check_size", "size %u != %zu\n", + size, 72 + sizeof(*meta))) + return; + if (CHECK(meta->ifindex != 1, "check_meta_ifindex", + "meta->ifindex = %d\n", meta->ifindex)) + /* spurious kfree_skb not on loopback device */ + return; + if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n", + meta->cb8_0, cb.cb8[0])) + return; + if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0", + "cb32_0 %x != %x\n", + meta->cb32_0, cb.cb32[0])) + return; + if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth", + "h_proto %x\n", pkt_v6->eth.h_proto)) + return; + if (CHECK(pkt_v6->iph.nexthdr != 6, "check_ip", + "iph.nexthdr %x\n", pkt_v6->iph.nexthdr)) + return; + if (CHECK(pkt_v6->tcp.doff != 5, "check_tcp", + "tcp.doff %x\n", pkt_v6->tcp.doff)) + return; + + *(bool *)ctx = true; +} + +void test_kfree_skb(void) +{ + struct __sk_buff skb = {}; + struct bpf_prog_test_run_attr tattr = { + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + }; + struct bpf_prog_load_attr attr = { + .file = "./kfree_skb.o", + }; + + struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL; + struct bpf_map *perf_buf_map, *global_data; + struct bpf_program *prog, *fentry, *fexit; + struct bpf_object *obj, *obj2 = NULL; + struct perf_buffer_opts pb_opts = {}; + struct perf_buffer *pb = NULL; + int err, kfree_skb_fd; + bool passed = false; + __u32 duration = 0; + const int zero = 0; + bool test_ok[2]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &tattr.prog_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + + err = bpf_prog_load_xattr(&attr, &obj2, &kfree_skb_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + goto close_prog; + + prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb"); + if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n")) + goto close_prog; + fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans"); + if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n")) + goto close_prog; + fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans"); + if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n")) + goto close_prog; + + global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss"); + if (CHECK(!global_data, "find global data", "not found\n")) + goto close_prog; + + link = bpf_program__attach_raw_tracepoint(prog, NULL); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) + goto close_prog; + link_fentry = bpf_program__attach_trace(fentry); + if (CHECK(IS_ERR(link_fentry), "attach fentry", "err %ld\n", + PTR_ERR(link_fentry))) + goto close_prog; + link_fexit = bpf_program__attach_trace(fexit); + if (CHECK(IS_ERR(link_fexit), "attach fexit", "err %ld\n", + PTR_ERR(link_fexit))) + goto close_prog; + + perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map"); + if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) + goto close_prog; + + /* set up perf buffer */ + pb_opts.sample_cb = on_sample; + pb_opts.ctx = &passed; + pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); + if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) + goto close_prog; + + memcpy(skb.cb, &cb, sizeof(cb)); + err = bpf_prog_test_run_xattr(&tattr); + duration = tattr.duration; + CHECK(err || tattr.retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, tattr.retval, duration); + + /* read perf buffer */ + err = perf_buffer__poll(pb, 100); + if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) + goto close_prog; + + /* make sure kfree_skb program was triggered + * and it sent expected skb into ring buffer + */ + CHECK_FAIL(!passed); + + err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + CHECK_FAIL(!test_ok[0] || !test_ok[1]); +close_prog: + perf_buffer__free(pb); + if (!IS_ERR_OR_NULL(link)) + bpf_link__destroy(link); + if (!IS_ERR_OR_NULL(link_fentry)) + bpf_link__destroy(link_fentry); + if (!IS_ERR_OR_NULL(link_fexit)) + bpf_link__destroy(link_fexit); + bpf_object__close(obj); + bpf_object__close(obj2); +} diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c index 20ddca830e68..eaf64595be88 100644 --- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -30,10 +30,8 @@ static void test_l4lb(const char *file) u32 *magic = (u32 *)buf; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) { - error_cnt++; + if (CHECK_FAIL(err)) return; - } map_fd = bpf_find_map(__func__, obj, "vip_map"); if (map_fd < 0) @@ -72,10 +70,9 @@ static void test_l4lb(const char *file) bytes += stats[i].bytes; pkts += stats[i].pkts; } - if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { - error_cnt++; + if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 || + pkts != NUM_ITER * 2)) printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); - } out: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c index ee99368c595c..8f91f1881d11 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c @@ -8,14 +8,12 @@ static void *parallel_map_access(void *arg) for (i = 0; i < 10000; i++) { err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK); - if (err) { + if (CHECK_FAIL(err)) { printf("lookup failed\n"); - error_cnt++; goto out; } - if (vars[0] != 0) { + if (CHECK_FAIL(vars[0] != 0)) { printf("lookup #%d var[0]=%d\n", i, vars[0]); - error_cnt++; goto out; } rnd = vars[1]; @@ -24,7 +22,7 @@ static void *parallel_map_access(void *arg) continue; printf("lookup #%d var[1]=%d var[%d]=%d\n", i, rnd, j, vars[j]); - error_cnt++; + CHECK_FAIL(vars[j] != rnd); goto out; } } @@ -42,34 +40,36 @@ void test_map_lock(void) void *ret; err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); - if (err) { + if (CHECK_FAIL(err)) { printf("test_map_lock:bpf_prog_load errno %d\n", errno); goto close_prog; } map_fd[0] = bpf_find_map(__func__, obj, "hash_map"); - if (map_fd[0] < 0) + if (CHECK_FAIL(map_fd[0] < 0)) goto close_prog; map_fd[1] = bpf_find_map(__func__, obj, "array_map"); - if (map_fd[1] < 0) + if (CHECK_FAIL(map_fd[1] < 0)) goto close_prog; bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK); for (i = 0; i < 4; i++) - assert(pthread_create(&thread_id[i], NULL, - &spin_lock_thread, &prog_fd) == 0); + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, + &spin_lock_thread, &prog_fd))) + goto close_prog; for (i = 4; i < 6; i++) - assert(pthread_create(&thread_id[i], NULL, - ¶llel_map_access, &map_fd[i - 4]) == 0); + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, + ¶llel_map_access, + &map_fd[i - 4]))) + goto close_prog; for (i = 0; i < 4; i++) - assert(pthread_join(thread_id[i], &ret) == 0 && - ret == (void *)&prog_fd); + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || + ret != (void *)&prog_fd)) + goto close_prog; for (i = 4; i < 6; i++) - assert(pthread_join(thread_id[i], &ret) == 0 && - ret == (void *)&map_fd[i - 4]); - goto close_prog_noerr; + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || + ret != (void *)&map_fd[i - 4])) + goto close_prog; close_prog: - error_cnt++; -close_prog_noerr: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c new file mode 100644 index 000000000000..051a6d48762c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <sys/mman.h> + +struct map_data { + __u64 val[512 * 4]; +}; + +struct bss_data { + __u64 in_val; + __u64 out_val; +}; + +static size_t roundup_page(size_t sz) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + return (sz + page_size - 1) / page_size * page_size; +} + +void test_mmap(void) +{ + const char *file = "test_mmap.o"; + const char *probe_name = "raw_tracepoint/sys_enter"; + const char *tp_name = "sys_enter"; + const size_t bss_sz = roundup_page(sizeof(struct bss_data)); + const size_t map_sz = roundup_page(sizeof(struct map_data)); + const int zero = 0, one = 1, two = 2, far = 1500; + const long page_size = sysconf(_SC_PAGE_SIZE); + int err, duration = 0, i, data_map_fd; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_link *link = NULL; + struct bpf_map *data_map, *bss_map; + void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; + volatile struct bss_data *bss_data; + volatile struct map_data *map_data; + __u64 val = 0; + + obj = bpf_object__open_file("test_mmap.o", NULL); + if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n", + file, PTR_ERR(obj))) + return; + prog = bpf_object__find_program_by_title(obj, probe_name); + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name)) + goto cleanup; + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n", + probe_name, err)) + goto cleanup; + + bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss"); + if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n")) + goto cleanup; + data_map = bpf_object__find_map_by_name(obj, "data_map"); + if (CHECK(!data_map, "find_data_map", "data_map map not found\n")) + goto cleanup; + data_map_fd = bpf_map__fd(data_map); + + bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + bpf_map__fd(bss_map), 0); + if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap", + ".bss mmap failed: %d\n", errno)) { + bss_mmaped = NULL; + goto cleanup; + } + /* map as R/W first */ + map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + data_map_fd, 0); + if (CHECK(map_mmaped == MAP_FAILED, "data_mmap", + "data_map mmap failed: %d\n", errno)) { + map_mmaped = NULL; + goto cleanup; + } + + bss_data = bss_mmaped; + map_data = map_mmaped; + + CHECK_FAIL(bss_data->in_val); + CHECK_FAIL(bss_data->out_val); + CHECK_FAIL(map_data->val[0]); + CHECK_FAIL(map_data->val[1]); + CHECK_FAIL(map_data->val[2]); + CHECK_FAIL(map_data->val[far]); + + link = bpf_program__attach_raw_tracepoint(prog, tp_name); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) + goto cleanup; + + bss_data->in_val = 123; + val = 111; + CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0)); + + usleep(1); + + CHECK_FAIL(bss_data->in_val != 123); + CHECK_FAIL(bss_data->out_val != 123); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 123); + CHECK_FAIL(map_data->val[far] != 3 * 123); + + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val)); + CHECK_FAIL(val != 111); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val)); + CHECK_FAIL(val != 222); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val)); + CHECK_FAIL(val != 123); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val)); + CHECK_FAIL(val != 3 * 123); + + /* data_map freeze should fail due to R/W mmap() */ + err = bpf_map_freeze(data_map_fd); + if (CHECK(!err || errno != EBUSY, "no_freeze", + "data_map freeze succeeded: err=%d, errno=%d\n", err, errno)) + goto cleanup; + + /* unmap R/W mapping */ + err = munmap(map_mmaped, map_sz); + map_mmaped = NULL; + if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno)) + goto cleanup; + + /* re-map as R/O now */ + map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0); + if (CHECK(map_mmaped == MAP_FAILED, "data_mmap", + "data_map R/O mmap failed: %d\n", errno)) { + map_mmaped = NULL; + goto cleanup; + } + map_data = map_mmaped; + + /* map/unmap in a loop to test ref counting */ + for (i = 0; i < 10; i++) { + int flags = i % 2 ? PROT_READ : PROT_WRITE; + void *p; + + p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0); + if (CHECK_FAIL(p == MAP_FAILED)) + goto cleanup; + err = munmap(p, map_sz); + if (CHECK_FAIL(err)) + goto cleanup; + } + + /* data_map freeze should now succeed due to no R/W mapping */ + err = bpf_map_freeze(data_map_fd); + if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n", + err, errno)) + goto cleanup; + + /* mapping as R/W now should fail */ + tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + data_map_fd, 0); + if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) { + munmap(tmp1, map_sz); + goto cleanup; + } + + bss_data->in_val = 321; + usleep(1); + CHECK_FAIL(bss_data->in_val != 321); + CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 321); + CHECK_FAIL(map_data->val[far] != 3 * 321); + + /* check some more advanced mmap() manipulations */ + + /* map all but last page: pages 1-3 mapped */ + tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED, + data_map_fd, 0); + if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno)) + goto cleanup; + + /* unmap second page: pages 1, 3 mapped */ + err = munmap(tmp1 + page_size, page_size); + if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) { + munmap(tmp1, map_sz); + goto cleanup; + } + + /* map page 2 back */ + tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ, + MAP_SHARED | MAP_FIXED, data_map_fd, 0); + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) { + munmap(tmp1, page_size); + munmap(tmp1 + 2*page_size, page_size); + goto cleanup; + } + CHECK(tmp1 + page_size != tmp2, "adv_mmap4", + "tmp1: %p, tmp2: %p\n", tmp1, tmp2); + + /* re-map all 4 pages */ + tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, + data_map_fd, 0); + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) { + munmap(tmp1, 3 * page_size); /* unmap page 1 */ + goto cleanup; + } + CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); + + map_data = tmp2; + CHECK_FAIL(bss_data->in_val != 321); + CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 321); + CHECK_FAIL(map_data->val[far] != 3 * 321); + + munmap(tmp2, 4 * page_size); +cleanup: + if (bss_mmaped) + CHECK_FAIL(munmap(bss_mmaped, bss_sz)); + if (map_mmaped) + CHECK_FAIL(munmap(map_mmaped, map_sz)); + if (!IS_ERR_OR_NULL(link)) + bpf_link__destroy(link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c new file mode 100644 index 000000000000..3003fddc0613 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <sys/socket.h> +#include <test_progs.h> + +static void on_sample(void *ctx, int cpu, void *data, __u32 size) +{ + int cpu_data = *(int *)data, duration = 0; + cpu_set_t *cpu_seen = ctx; + + if (cpu_data != cpu) + CHECK(cpu_data != cpu, "check_cpu_data", + "cpu_data %d != cpu %d\n", cpu_data, cpu); + + CPU_SET(cpu, cpu_seen); +} + +void test_perf_buffer(void) +{ + int err, prog_fd, nr_cpus, i, duration = 0; + const char *prog_name = "kprobe/sys_nanosleep"; + const char *file = "./test_perf_buffer.o"; + struct perf_buffer_opts pb_opts = {}; + struct bpf_map *perf_buf_map; + cpu_set_t cpu_set, cpu_seen; + struct bpf_program *prog; + struct bpf_object *obj; + struct perf_buffer *pb; + struct bpf_link *link; + + nr_cpus = libbpf_num_possible_cpus(); + if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus)) + return; + + /* load program */ + err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); + if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) + return; + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) + goto out_close; + + /* load map */ + perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); + if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) + goto out_close; + + /* attach kprobe */ + link = bpf_program__attach_kprobe(prog, false /* retprobe */, + SYS_NANOSLEEP_KPROBE_NAME); + if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) + goto out_close; + + /* set up perf buffer */ + pb_opts.sample_cb = on_sample; + pb_opts.ctx = &cpu_seen; + pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); + if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) + goto out_detach; + + /* trigger kprobe on every CPU */ + CPU_ZERO(&cpu_seen); + for (i = 0; i < nr_cpus; i++) { + CPU_ZERO(&cpu_set); + CPU_SET(i, &cpu_set); + + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), + &cpu_set); + if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", + i, err)) + goto out_detach; + + usleep(1); + } + + /* read perf buffer */ + err = perf_buffer__poll(pb, 100); + if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) + goto out_free_pb; + + if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt", + "expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen))) + goto out_free_pb; + +out_free_pb: + perf_buffer__free(pb); +out_detach: + bpf_link__destroy(link); +out_close: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c new file mode 100644 index 000000000000..041952524c55 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <test_progs.h> + +__u32 get_map_id(struct bpf_object *obj, const char *name) +{ + struct bpf_map_info map_info = {}; + __u32 map_info_len, duration = 0; + struct bpf_map *map; + int err; + + map_info_len = sizeof(map_info); + + map = bpf_object__find_map_by_name(obj, name); + if (CHECK(!map, "find map", "NULL map")) + return 0; + + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), + &map_info, &map_info_len); + CHECK(err, "get map info", "err %d errno %d", err, errno); + return map_info.id; +} + +void test_pinning(void) +{ + const char *file_invalid = "./test_pinning_invalid.o"; + const char *custpinpath = "/sys/fs/bpf/custom/pinmap"; + const char *nopinpath = "/sys/fs/bpf/nopinmap"; + const char *nopinpath2 = "/sys/fs/bpf/nopinmap2"; + const char *custpath = "/sys/fs/bpf/custom"; + const char *pinpath = "/sys/fs/bpf/pinmap"; + const char *file = "./test_pinning.o"; + __u32 map_id, map_id2, duration = 0; + struct stat statbuf = {}; + struct bpf_object *obj; + struct bpf_map *map; + int err; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .pin_root_path = custpath, + ); + + /* check that opening fails with invalid pinning value in map def */ + obj = bpf_object__open_file(file_invalid, NULL); + err = libbpf_get_error(obj); + if (CHECK(err != -EINVAL, "invalid open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + /* open the valid object file */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "default load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that pinmap was pinned */ + err = stat(pinpath, &statbuf); + if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap was *not* pinned */ + err = stat(nopinpath, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath", + "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap2 was *not* pinned */ + err = stat(nopinpath2, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath2", + "err %d errno %d\n", err, errno)) + goto out; + + map_id = get_map_id(obj, "pinmap"); + if (!map_id) + goto out; + + bpf_object__close(obj); + + obj = bpf_object__open_file(file, NULL); + if (CHECK_FAIL(libbpf_get_error(obj))) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "default load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that same map ID was reused for second load */ + map_id2 = get_map_id(obj, "pinmap"); + if (CHECK(map_id != map_id2, "check reuse", + "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2)) + goto out; + + /* should be no-op to re-pin same map */ + map = bpf_object__find_map_by_name(obj, "pinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto out; + + err = bpf_map__pin(map, NULL); + if (CHECK(err, "re-pin map", "err %d errno %d\n", err, errno)) + goto out; + + /* but error to pin at different location */ + err = bpf_map__pin(map, "/sys/fs/bpf/other"); + if (CHECK(!err, "pin map different", "err %d errno %d\n", err, errno)) + goto out; + + /* unpin maps with a pin_path set */ + err = bpf_object__unpin_maps(obj, NULL); + if (CHECK(err, "unpin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* and re-pin them... */ + err = bpf_object__pin_maps(obj, NULL); + if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* set pinning path of other map and re-pin all */ + map = bpf_object__find_map_by_name(obj, "nopinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto out; + + err = bpf_map__set_pin_path(map, custpinpath); + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto out; + + /* should only pin the one unpinned map */ + err = bpf_object__pin_maps(obj, NULL); + if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* remove the custom pin path to re-test it with auto-pinning below */ + err = unlink(custpinpath); + if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + err = rmdir(custpath); + if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* open the valid object file again */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + /* set pin paths so that nopinmap2 will attempt to reuse the map at + * pinpath (which will fail), but not before pinmap has already been + * reused + */ + bpf_object__for_each_map(map, obj) { + if (!strcmp(bpf_map__name(map), "nopinmap")) + err = bpf_map__set_pin_path(map, nopinpath2); + else if (!strcmp(bpf_map__name(map), "nopinmap2")) + err = bpf_map__set_pin_path(map, pinpath); + else + continue; + + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto out; + } + + /* should fail because of map parameter mismatch */ + err = bpf_object__load(obj); + if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno)) + goto out; + + /* nopinmap2 should have been pinned and cleaned up again */ + err = stat(nopinpath2, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath2", + "err %d errno %d\n", err, errno)) + goto out; + + /* pinmap should still be there */ + err = stat(pinpath, &statbuf); + if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* test auto-pinning at custom path with open opt */ + obj = bpf_object__open_file(file, &opts); + if (CHECK_FAIL(libbpf_get_error(obj))) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "custom load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that pinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto out; + +out: + unlink(pinpath); + unlink(nopinpath); + unlink(nopinpath2); + unlink(custpinpath); + rmdir(custpath); + if (obj) + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c index 4ecfd721a044..a2537dfa899c 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c @@ -9,10 +9,8 @@ void test_pkt_access(void) int err, prog_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) { - error_cnt++; + if (CHECK_FAIL(err)) return; - } err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), NULL, NULL, &retval, &duration); diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c index ac0d43435806..5f7aea605019 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c @@ -9,10 +9,8 @@ void test_pkt_md_access(void) int err, prog_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) { - error_cnt++; + if (CHECK_FAIL(err)) return; - } err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), NULL, NULL, &retval, &duration); diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c new file mode 100644 index 000000000000..8a3187dec048 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_probe_user(void) +{ +#define kprobe_name "__sys_connect" + const char *prog_name = "kprobe/" kprobe_name; + const char *obj_file = "./test_probe_user.o"; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, ); + int err, results_map_fd, sock_fd, duration = 0; + struct sockaddr curr, orig, tmp; + struct sockaddr_in *in = (struct sockaddr_in *)&curr; + struct bpf_link *kprobe_link = NULL; + struct bpf_program *kprobe_prog; + struct bpf_object *obj; + static const int zero = 0; + + obj = bpf_object__open_file(obj_file, &opts); + if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) + return; + + kprobe_prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!kprobe_prog, "find_probe", + "prog '%s' not found\n", prog_name)) + goto cleanup; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; + + results_map_fd = bpf_find_map(__func__, obj, "test_pro.bss"); + if (CHECK(results_map_fd < 0, "find_bss_map", + "err %d\n", results_map_fd)) + goto cleanup; + + kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false, + kprobe_name); + if (CHECK(IS_ERR(kprobe_link), "attach_kprobe", + "err %ld\n", PTR_ERR(kprobe_link))) { + kprobe_link = NULL; + goto cleanup; + } + + memset(&curr, 0, sizeof(curr)); + in->sin_family = AF_INET; + in->sin_port = htons(5555); + in->sin_addr.s_addr = inet_addr("255.255.255.255"); + memcpy(&orig, &curr, sizeof(curr)); + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (CHECK(sock_fd < 0, "create_sock_fd", "err %d\n", sock_fd)) + goto cleanup; + + connect(sock_fd, &curr, sizeof(curr)); + close(sock_fd); + + err = bpf_map_lookup_elem(results_map_fd, &zero, &tmp); + if (CHECK(err, "get_kprobe_res", + "failed to get kprobe res: %d\n", err)) + goto cleanup; + + in = (struct sockaddr_in *)&tmp; + if (CHECK(memcmp(&tmp, &orig, sizeof(orig)), "check_kprobe_res", + "wrong kprobe res from probe read: %s:%u\n", + inet_ntoa(in->sin_addr), ntohs(in->sin_port))) + goto cleanup; + + memset(&tmp, 0xab, sizeof(tmp)); + + in = (struct sockaddr_in *)&curr; + if (CHECK(memcmp(&curr, &tmp, sizeof(tmp)), "check_kprobe_res", + "wrong kprobe res from probe write: %s:%u\n", + inet_ntoa(in->sin_addr), ntohs(in->sin_port))) + goto cleanup; +cleanup: + bpf_link__destroy(kprobe_link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c index e60cd5ff1f55..faccc66f4e39 100644 --- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -27,10 +27,8 @@ static void test_queue_stack_map_by_type(int type) return; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) { - error_cnt++; + if (CHECK_FAIL(err)) return; - } map_in_fd = bpf_find_map(__func__, obj, "map_in"); if (map_in_fd < 0) @@ -43,10 +41,8 @@ static void test_queue_stack_map_by_type(int type) /* Push 32 elements to the input map */ for (i = 0; i < MAP_SIZE; i++) { err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0); - if (err) { - error_cnt++; + if (CHECK_FAIL(err)) goto out; - } } /* The eBPF program pushes iph.saddr in the output map, diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c new file mode 100644 index 000000000000..9807336a3016 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include <linux/nbd.h> + +void test_raw_tp_writable_reject_nbd_invalid(void) +{ + __u32 duration = 0; + char error[4096]; + int bpf_fd = -1, tp_fd = -1; + + const struct bpf_insn program[] = { + /* r6 is our tp buffer */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* one byte beyond the end of the nbd_request struct */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, + sizeof(struct nbd_request)), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr load_attr = { + .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, + .license = "GPL v2", + .insns = program, + .insns_cnt = sizeof(program) / sizeof(struct bpf_insn), + .log_level = 2, + }; + + bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load", + "failed: %d errno %d\n", bpf_fd, errno)) + return; + + tp_fd = bpf_raw_tracepoint_open("nbd_send_request", bpf_fd); + if (CHECK(tp_fd >= 0, "bpf_raw_tracepoint_writable open", + "erroneously succeeded\n")) + goto out_bpffd; + + close(tp_fd); +out_bpffd: + close(bpf_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c new file mode 100644 index 000000000000..5c45424cac5f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include <linux/nbd.h> + +void test_raw_tp_writable_test_run(void) +{ + __u32 duration = 0; + char error[4096]; + + const struct bpf_insn trace_program[] = { + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr load_attr = { + .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, + .license = "GPL v2", + .insns = trace_program, + .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn), + .log_level = 2, + }; + + int bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded", + "failed: %d errno %d\n", bpf_fd, errno)) + return; + + const struct bpf_insn skb_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr skb_load_attr = { + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .license = "GPL v2", + .insns = skb_program, + .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn), + }; + + int filter_fd = + bpf_load_program_xattr(&skb_load_attr, error, sizeof(error)); + if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", + filter_fd, errno)) + goto out_bpffd; + + int tp_fd = bpf_raw_tracepoint_open("bpf_test_finish", bpf_fd); + if (CHECK(tp_fd < 0, "bpf_raw_tracepoint_writable opened", + "failed: %d errno %d\n", tp_fd, errno)) + goto out_filterfd; + + char test_skb[128] = { + 0, + }; + + __u32 prog_ret; + int err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, + 0, &prog_ret, 0); + CHECK(err != 42, "test_run", + "tracepoint did not modify return value\n"); + CHECK(prog_ret != 0, "test_run_ret", + "socket_filter did not return 0\n"); + + close(tp_fd); + + err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, 0, + &prog_ret, 0); + CHECK(err != 0, "test_run_notrace", + "test_run failed with %d errno %d\n", err, errno); + CHECK(prog_ret != 0, "test_run_ret_notrace", + "socket_filter did not return 0\n"); + +out_filterfd: + close(filter_fd); +out_bpffd: + close(bpf_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c new file mode 100644 index 000000000000..d90acc13d1ec --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +struct bss { + unsigned did_run; + unsigned iters; + unsigned sum; +}; + +struct rdonly_map_subtest { + const char *subtest_name; + const char *prog_name; + unsigned exp_iters; + unsigned exp_sum; +}; + +void test_rdonly_maps(void) +{ + const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop"; + const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop"; + const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop"; + const char *file = "test_rdonly_maps.o"; + struct rdonly_map_subtest subtests[] = { + { "skip loop", prog_name_skip_loop, 0, 0 }, + { "part loop", prog_name_part_loop, 3, 2 + 3 + 4 }, + { "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 }, + }; + int i, err, zero = 0, duration = 0; + struct bpf_link *link = NULL; + struct bpf_program *prog; + struct bpf_map *bss_map; + struct bpf_object *obj; + struct bss bss; + + obj = bpf_object__open_file(file, NULL); + if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj))) + return; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) + goto cleanup; + + bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss"); + if (CHECK(!bss_map, "find_bss_map", "failed\n")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(subtests); i++) { + const struct rdonly_map_subtest *t = &subtests[i]; + + if (!test__start_subtest(t->subtest_name)) + continue; + + prog = bpf_object__find_program_by_title(obj, t->prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", + t->prog_name)) + goto cleanup; + + memset(&bss, 0, sizeof(bss)); + err = bpf_map_update_elem(bpf_map__fd(bss_map), &zero, &bss, 0); + if (CHECK(err, "set_bss", "failed to set bss data: %d\n", err)) + goto cleanup; + + link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + if (CHECK(IS_ERR(link), "attach_prog", "prog '%s', err %ld\n", + t->prog_name, PTR_ERR(link))) { + link = NULL; + goto cleanup; + } + + /* trigger probe */ + usleep(1); + + bpf_link__destroy(link); + link = NULL; + + err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, &bss); + if (CHECK(err, "get_bss", "failed to get bss data: %d\n", err)) + goto cleanup; + if (CHECK(bss.did_run == 0, "check_run", + "prog '%s' didn't run?\n", t->prog_name)) + goto cleanup; + if (CHECK(bss.iters != t->exp_iters, "check_iters", + "prog '%s' iters: %d, expected: %d\n", + t->prog_name, bss.iters, t->exp_iters)) + goto cleanup; + if (CHECK(bss.sum != t->exp_sum, "check_sum", + "prog '%s' sum: %d, expected: %d\n", + t->prog_name, bss.sum, t->exp_sum)) + goto cleanup; + } + +cleanup: + bpf_link__destroy(link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c index 5633be43828f..fc0d7f4f02cf 100644 --- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -1,28 +1,27 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> -static int libbpf_debug_print(enum libbpf_print_level level, - const char *format, va_list args) -{ - if (level == LIBBPF_DEBUG) - return 0; - - return vfprintf(stderr, format, args); -} - void test_reference_tracking(void) { - const char *file = "./test_sk_lookup_kern.o"; + const char *file = "test_sk_lookup_kern.o"; + const char *obj_name = "ref_track"; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, + .object_name = obj_name, + .relaxed_maps = true, + ); struct bpf_object *obj; struct bpf_program *prog; __u32 duration = 0; int err = 0; - obj = bpf_object__open(file); - if (IS_ERR(obj)) { - error_cnt++; + obj = bpf_object__open_file(file, &open_opts); + if (CHECK_FAIL(IS_ERR(obj))) return; - } + + if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name", + "wrong obj name '%s', expected '%s'\n", + bpf_object__name(obj), obj_name)) + goto cleanup; bpf_object__for_each_program(prog, obj) { const char *title; @@ -32,17 +31,22 @@ void test_reference_tracking(void) if (strstr(title, ".text") != NULL) continue; - bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS); + if (!test__start_subtest(title)) + continue; /* Expect verifier failure if test name has 'fail' */ if (strstr(title, "fail") != NULL) { - libbpf_set_print(NULL); + libbpf_print_fn_t old_print_fn; + + old_print_fn = libbpf_set_print(NULL); err = !bpf_program__load(prog, "GPL", 0); - libbpf_set_print(libbpf_debug_print); + libbpf_set_print(old_print_fn); } else { err = bpf_program__load(prog, "GPL", 0); } CHECK(err, title, "\n"); } + +cleanup: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/section_names.c b/tools/testing/selftests/bpf/prog_tests/section_names.c new file mode 100644 index 000000000000..9d9351dc2ded --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/section_names.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook +#include <test_progs.h> + +static int duration = 0; + +struct sec_name_test { + const char sec_name[32]; + struct { + int rc; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + } expected_load; + struct { + int rc; + enum bpf_attach_type attach_type; + } expected_attach; +}; + +static struct sec_name_test tests[] = { + {"InvAliD", {-ESRCH, 0, 0}, {-EINVAL, 0} }, + {"cgroup", {-ESRCH, 0, 0}, {-EINVAL, 0} }, + {"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} }, + {"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"uprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"uretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} }, + {"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} }, + {"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} }, + {"tp/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} }, + { + "raw_tracepoint/", + {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, + {-EINVAL, 0}, + }, + {"raw_tp/", {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, {-EINVAL, 0} }, + {"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} }, + {"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} }, + {"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} }, + {"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} }, + {"lwt_xmit", {0, BPF_PROG_TYPE_LWT_XMIT, 0}, {-EINVAL, 0} }, + {"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} }, + { + "cgroup_skb/ingress", + {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, + {0, BPF_CGROUP_INET_INGRESS}, + }, + { + "cgroup_skb/egress", + {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, + {0, BPF_CGROUP_INET_EGRESS}, + }, + {"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} }, + { + "cgroup/sock", + {0, BPF_PROG_TYPE_CGROUP_SOCK, 0}, + {0, BPF_CGROUP_INET_SOCK_CREATE}, + }, + { + "cgroup/post_bind4", + {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND}, + {0, BPF_CGROUP_INET4_POST_BIND}, + }, + { + "cgroup/post_bind6", + {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND}, + {0, BPF_CGROUP_INET6_POST_BIND}, + }, + { + "cgroup/dev", + {0, BPF_PROG_TYPE_CGROUP_DEVICE, 0}, + {0, BPF_CGROUP_DEVICE}, + }, + {"sockops", {0, BPF_PROG_TYPE_SOCK_OPS, 0}, {0, BPF_CGROUP_SOCK_OPS} }, + { + "sk_skb/stream_parser", + {0, BPF_PROG_TYPE_SK_SKB, 0}, + {0, BPF_SK_SKB_STREAM_PARSER}, + }, + { + "sk_skb/stream_verdict", + {0, BPF_PROG_TYPE_SK_SKB, 0}, + {0, BPF_SK_SKB_STREAM_VERDICT}, + }, + {"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} }, + {"sk_msg", {0, BPF_PROG_TYPE_SK_MSG, 0}, {0, BPF_SK_MSG_VERDICT} }, + {"lirc_mode2", {0, BPF_PROG_TYPE_LIRC_MODE2, 0}, {0, BPF_LIRC_MODE2} }, + { + "flow_dissector", + {0, BPF_PROG_TYPE_FLOW_DISSECTOR, 0}, + {0, BPF_FLOW_DISSECTOR}, + }, + { + "cgroup/bind4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND}, + {0, BPF_CGROUP_INET4_BIND}, + }, + { + "cgroup/bind6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND}, + {0, BPF_CGROUP_INET6_BIND}, + }, + { + "cgroup/connect4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT}, + {0, BPF_CGROUP_INET4_CONNECT}, + }, + { + "cgroup/connect6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT}, + {0, BPF_CGROUP_INET6_CONNECT}, + }, + { + "cgroup/sendmsg4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG}, + {0, BPF_CGROUP_UDP4_SENDMSG}, + }, + { + "cgroup/sendmsg6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG}, + {0, BPF_CGROUP_UDP6_SENDMSG}, + }, + { + "cgroup/recvmsg4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG}, + {0, BPF_CGROUP_UDP4_RECVMSG}, + }, + { + "cgroup/recvmsg6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG}, + {0, BPF_CGROUP_UDP6_RECVMSG}, + }, + { + "cgroup/sysctl", + {0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL}, + {0, BPF_CGROUP_SYSCTL}, + }, + { + "cgroup/getsockopt", + {0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT}, + {0, BPF_CGROUP_GETSOCKOPT}, + }, + { + "cgroup/setsockopt", + {0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT}, + {0, BPF_CGROUP_SETSOCKOPT}, + }, +}; + +static void test_prog_type_by_name(const struct sec_name_test *test) +{ + enum bpf_attach_type expected_attach_type; + enum bpf_prog_type prog_type; + int rc; + + rc = libbpf_prog_type_by_name(test->sec_name, &prog_type, + &expected_attach_type); + + CHECK(rc != test->expected_load.rc, "check_code", + "prog: unexpected rc=%d for %s", rc, test->sec_name); + + if (rc) + return; + + CHECK(prog_type != test->expected_load.prog_type, "check_prog_type", + "prog: unexpected prog_type=%d for %s", + prog_type, test->sec_name); + + CHECK(expected_attach_type != test->expected_load.expected_attach_type, + "check_attach_type", "prog: unexpected expected_attach_type=%d for %s", + expected_attach_type, test->sec_name); +} + +static void test_attach_type_by_name(const struct sec_name_test *test) +{ + enum bpf_attach_type attach_type; + int rc; + + rc = libbpf_attach_type_by_name(test->sec_name, &attach_type); + + CHECK(rc != test->expected_attach.rc, "check_ret", + "attach: unexpected rc=%d for %s", rc, test->sec_name); + + if (rc) + return; + + CHECK(attach_type != test->expected_attach.attach_type, + "check_attach_type", "attach: unexpected attach_type=%d for %s", + attach_type, test->sec_name); +} + +void test_section_names(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tests); ++i) { + struct sec_name_test *test = &tests[i]; + + test_prog_type_by_name(test); + test_attach_type_by_name(test); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c new file mode 100644 index 000000000000..b607112c64e7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static volatile int sigusr1_received = 0; + +static void sigusr1_handler(int signum) +{ + sigusr1_received++; +} + +static void test_send_signal_common(struct perf_event_attr *attr, + int prog_type, + const char *test_name) +{ + int err = -1, pmu_fd, prog_fd, info_map_fd, status_map_fd; + const char *file = "./test_send_signal_kern.o"; + struct bpf_object *obj = NULL; + int pipe_c2p[2], pipe_p2c[2]; + __u32 key = 0, duration = 0; + char buf[256]; + pid_t pid; + __u64 val; + + if (CHECK(pipe(pipe_c2p), test_name, + "pipe pipe_c2p error: %s\n", strerror(errno))) + return; + + if (CHECK(pipe(pipe_p2c), test_name, + "pipe pipe_p2c error: %s\n", strerror(errno))) { + close(pipe_c2p[0]); + close(pipe_c2p[1]); + return; + } + + pid = fork(); + if (CHECK(pid < 0, test_name, "fork error: %s\n", strerror(errno))) { + close(pipe_c2p[0]); + close(pipe_c2p[1]); + close(pipe_p2c[0]); + close(pipe_p2c[1]); + return; + } + + if (pid == 0) { + /* install signal handler and notify parent */ + signal(SIGUSR1, sigusr1_handler); + + close(pipe_c2p[0]); /* close read */ + close(pipe_p2c[1]); /* close write */ + + /* notify parent signal handler is installed */ + write(pipe_c2p[1], buf, 1); + + /* make sure parent enabled bpf program to send_signal */ + read(pipe_p2c[0], buf, 1); + + /* wait a little for signal handler */ + sleep(1); + + if (sigusr1_received) + write(pipe_c2p[1], "2", 1); + else + write(pipe_c2p[1], "0", 1); + + /* wait for parent notification and exit */ + read(pipe_p2c[0], buf, 1); + + close(pipe_c2p[1]); + close(pipe_p2c[0]); + exit(0); + } + + close(pipe_c2p[1]); /* close write */ + close(pipe_p2c[0]); /* close read */ + + err = bpf_prog_load(file, prog_type, &obj, &prog_fd); + if (CHECK(err < 0, test_name, "bpf_prog_load error: %s\n", + strerror(errno))) + goto prog_load_failure; + + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1, + -1 /* group id */, 0 /* flags */); + if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n", + strerror(errno))) { + err = -1; + goto close_prog; + } + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_enable error: %s\n", + strerror(errno))) + goto disable_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_set_bpf error: %s\n", + strerror(errno))) + goto disable_pmu; + + err = -1; + info_map_fd = bpf_object__find_map_fd_by_name(obj, "info_map"); + if (CHECK(info_map_fd < 0, test_name, "find map %s error\n", "info_map")) + goto disable_pmu; + + status_map_fd = bpf_object__find_map_fd_by_name(obj, "status_map"); + if (CHECK(status_map_fd < 0, test_name, "find map %s error\n", "status_map")) + goto disable_pmu; + + /* wait until child signal handler installed */ + read(pipe_c2p[0], buf, 1); + + /* trigger the bpf send_signal */ + key = 0; + val = (((__u64)(SIGUSR1)) << 32) | pid; + bpf_map_update_elem(info_map_fd, &key, &val, 0); + + /* notify child that bpf program can send_signal now */ + write(pipe_p2c[1], buf, 1); + + /* wait for result */ + err = read(pipe_c2p[0], buf, 1); + if (CHECK(err < 0, test_name, "reading pipe error: %s\n", strerror(errno))) + goto disable_pmu; + if (CHECK(err == 0, test_name, "reading pipe error: size 0\n")) { + err = -1; + goto disable_pmu; + } + + CHECK(buf[0] != '2', test_name, "incorrect result\n"); + + /* notify child safe to exit */ + write(pipe_p2c[1], buf, 1); + +disable_pmu: + close(pmu_fd); +close_prog: + bpf_object__close(obj); +prog_load_failure: + close(pipe_c2p[0]); + close(pipe_p2c[1]); + wait(NULL); +} + +static void test_send_signal_tracepoint(void) +{ + const char *id_path = "/sys/kernel/debug/tracing/events/syscalls/sys_enter_nanosleep/id"; + struct perf_event_attr attr = { + .type = PERF_TYPE_TRACEPOINT, + .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN, + .sample_period = 1, + .wakeup_events = 1, + }; + __u32 duration = 0; + int bytes, efd; + char buf[256]; + + efd = open(id_path, O_RDONLY, 0); + if (CHECK(efd < 0, "tracepoint", + "open syscalls/sys_enter_nanosleep/id failure: %s\n", + strerror(errno))) + return; + + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "tracepoint", + "read syscalls/sys_enter_nanosleep/id failure: %s\n", + strerror(errno))) + return; + + attr.config = strtol(buf, NULL, 0); + + test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint"); +} + +static void test_send_signal_perf(void) +{ + struct perf_event_attr attr = { + .sample_period = 1, + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + }; + + test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, + "perf_sw_event"); +} + +static void test_send_signal_nmi(void) +{ + struct perf_event_attr attr = { + .sample_freq = 50, + .freq = 1, + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + }; + int pmu_fd; + + /* Some setups (e.g. virtual machines) might run with hardware + * perf events disabled. If this is the case, skip this test. + */ + pmu_fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */, + -1 /* cpu */, -1 /* group_fd */, 0 /* flags */); + if (pmu_fd == -1) { + if (errno == ENOENT) { + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", + __func__); + test__skip(); + return; + } + /* Let the test fail with a more informative message */ + } else { + close(pmu_fd); + } + + test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, + "perf_hw_event"); +} + +void test_send_signal(void) +{ + if (test__start_subtest("send_signal_tracepoint")) + test_send_signal_tracepoint(); + if (test__start_subtest("send_signal_perf")) + test_send_signal_perf(); + if (test__start_subtest("send_signal_nmi")) + test_send_signal_nmi(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c new file mode 100644 index 000000000000..a2eb8db8dafb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_skb_ctx(void) +{ + struct __sk_buff skb = { + .cb[0] = 1, + .cb[1] = 2, + .cb[2] = 3, + .cb[3] = 4, + .cb[4] = 5, + .priority = 6, + .tstamp = 7, + }; + struct bpf_prog_test_run_attr tattr = { + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + .ctx_out = &skb, + .ctx_size_out = sizeof(skb), + }; + struct bpf_object *obj; + int err; + int i; + + err = bpf_prog_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &tattr.prog_fd); + if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) + return; + + /* ctx_in != NULL, ctx_size_in == 0 */ + + tattr.ctx_size_in = 0; + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err == 0, "ctx_size_in", "err %d errno %d\n", err, errno); + tattr.ctx_size_in = sizeof(skb); + + /* ctx_out != NULL, ctx_size_out == 0 */ + + tattr.ctx_size_out = 0; + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err == 0, "ctx_size_out", "err %d errno %d\n", err, errno); + tattr.ctx_size_out = sizeof(skb); + + /* non-zero [len, tc_index] fields should be rejected*/ + + skb.len = 1; + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err == 0, "len", "err %d errno %d\n", err, errno); + skb.len = 0; + + skb.tc_index = 1; + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err == 0, "tc_index", "err %d errno %d\n", err, errno); + skb.tc_index = 0; + + /* non-zero [hash, sk] fields should be rejected */ + + skb.hash = 1; + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err == 0, "hash", "err %d errno %d\n", err, errno); + skb.hash = 0; + + skb.sk = (struct bpf_sock *)1; + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err == 0, "sk", "err %d errno %d\n", err, errno); + skb.sk = 0; + + err = bpf_prog_test_run_xattr(&tattr); + CHECK_ATTR(err != 0 || tattr.retval, + "run", + "err %d errno %d retval %d\n", + err, errno, tattr.retval); + + CHECK_ATTR(tattr.ctx_size_out != sizeof(skb), + "ctx_size_out", + "incorrect output size, want %lu have %u\n", + sizeof(skb), tattr.ctx_size_out); + + for (i = 0; i < 5; i++) + CHECK_ATTR(skb.cb[i] != i + 2, + "ctx_out_cb", + "skb->cb[i] == %d, expected %d\n", + skb.cb[i], i + 2); + CHECK_ATTR(skb.priority != 7, + "ctx_out_priority", + "skb->priority == %d, expected %d\n", + skb.priority, 7); + CHECK_ATTR(skb.tstamp != 8, + "ctx_out_tstamp", + "skb->tstamp == %lld, expected %d\n", + skb.tstamp, 8); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c new file mode 100644 index 000000000000..3e8517a8395a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c @@ -0,0 +1,985 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +static char bpf_log_buf[4096]; +static bool verbose; + +enum sockopt_test_error { + OK = 0, + DENY_LOAD, + DENY_ATTACH, + EPERM_GETSOCKOPT, + EFAULT_GETSOCKOPT, + EPERM_SETSOCKOPT, + EFAULT_SETSOCKOPT, +}; + +static struct sockopt_test { + const char *descr; + const struct bpf_insn insns[64]; + enum bpf_attach_type attach_type; + enum bpf_attach_type expected_attach_type; + + int set_optname; + int set_level; + const char set_optval[64]; + socklen_t set_optlen; + + int get_optname; + int get_level; + const char get_optval[64]; + socklen_t get_optlen; + socklen_t get_optlen_ret; + + enum sockopt_test_error error; +} tests[] = { + + /* ==================== getsockopt ==================== */ + + { + .descr = "getsockopt: no expected_attach_type", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = 0, + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: wrong expected_attach_type", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + .error = DENY_ATTACH, + }, + { + .descr = "getsockopt: bypass bpf hook", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 1 << 3 }, + .set_optlen = 1, + + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "getsockopt: return EPERM from bpf hook", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + + .get_optlen = 1, + .error = EPERM_GETSOCKOPT, + }, + { + .descr = "getsockopt: no optval bounds check, deny loading", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + + /* ctx->optval[0] = 0x80 */ + BPF_MOV64_IMM(BPF_REG_0, 0x80), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_0, 0), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: read ctx->level", + .insns = { + /* r6 = ctx->level */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, level)), + + /* if (ctx->level == 123) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4), + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = 123, + + .get_optlen = 1, + }, + { + .descr = "getsockopt: deny writing to ctx->level", + .insns = { + /* ctx->level = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, level)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: read ctx->optname", + .insns = { + /* r6 = ctx->optname */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optname)), + + /* if (ctx->optname == 123) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4), + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optname = 123, + + .get_optlen = 1, + }, + { + .descr = "getsockopt: read ctx->retval", + .insns = { + /* r6 = ctx->retval */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + .get_optlen = 1, + }, + { + .descr = "getsockopt: deny writing to ctx->optname", + .insns = { + /* ctx->optname = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optname)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: read ctx->optlen", + .insns = { + /* r6 = ctx->optlen */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optlen)), + + /* if (ctx->optlen == 64) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 64, 4), + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optlen = 64, + }, + { + .descr = "getsockopt: deny bigger ctx->optlen", + .insns = { + /* ctx->optlen = 65 */ + BPF_MOV64_IMM(BPF_REG_0, 65), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optlen = 64, + + .error = EFAULT_GETSOCKOPT, + }, + { + .descr = "getsockopt: deny arbitrary ctx->retval", + .insns = { + /* ctx->retval = 123 */ + BPF_MOV64_IMM(BPF_REG_0, 123), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optlen = 64, + + .error = EFAULT_GETSOCKOPT, + }, + { + .descr = "getsockopt: support smaller ctx->optlen", + .insns = { + /* ctx->optlen = 32 */ + BPF_MOV64_IMM(BPF_REG_0, 32), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optlen = 64, + .get_optlen_ret = 32, + }, + { + .descr = "getsockopt: deny writing to ctx->optval", + .insns = { + /* ctx->optval = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optval)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: deny writing to ctx->optval_end", + .insns = { + /* ctx->optval_end = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optval_end)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: rewrite value", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 0xF0 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0), + /* } */ + + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + + /* return 1*/ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + + .get_optval = { 0xF0 }, + .get_optlen = 1, + }, + + /* ==================== setsockopt ==================== */ + + { + .descr = "setsockopt: no expected_attach_type", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = 0, + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: wrong expected_attach_type", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + .error = DENY_ATTACH, + }, + { + .descr = "setsockopt: bypass bpf hook", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 1 << 3 }, + .set_optlen = 1, + + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: return EPERM from bpf hook", + .insns = { + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_level = SOL_IP, + .set_optname = IP_TOS, + + .set_optlen = 1, + .error = EPERM_SETSOCKOPT, + }, + { + .descr = "setsockopt: no optval bounds check, deny loading", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + + /* r0 = ctx->optval[0] */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: read ctx->level", + .insns = { + /* r6 = ctx->level */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, level)), + + /* if (ctx->level == 123) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4), + /* ctx->optlen = -1 */ + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_level = 123, + + .set_optlen = 1, + }, + { + .descr = "setsockopt: allow changing ctx->level", + .insns = { + /* ctx->level = SOL_IP */ + BPF_MOV64_IMM(BPF_REG_0, SOL_IP), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, level)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = 234, /* should be rewritten to SOL_IP */ + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 1 << 3 }, + .set_optlen = 1, + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: read ctx->optname", + .insns = { + /* r6 = ctx->optname */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optname)), + + /* if (ctx->optname == 123) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4), + /* ctx->optlen = -1 */ + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optname = 123, + + .set_optlen = 1, + }, + { + .descr = "setsockopt: allow changing ctx->optname", + .insns = { + /* ctx->optname = IP_TOS */ + BPF_MOV64_IMM(BPF_REG_0, IP_TOS), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optname)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = 456, /* should be rewritten to IP_TOS */ + + .set_optval = { 1 << 3 }, + .set_optlen = 1, + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: read ctx->optlen", + .insns = { + /* r6 = ctx->optlen */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optlen)), + + /* if (ctx->optlen == 64) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 64, 4), + /* ctx->optlen = -1 */ + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optlen = 64, + }, + { + .descr = "setsockopt: ctx->optlen == -1 is ok", + .insns = { + /* ctx->optlen = -1 */ + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optlen = 64, + }, + { + .descr = "setsockopt: deny ctx->optlen < 0 (except -1)", + .insns = { + /* ctx->optlen = -2 */ + BPF_MOV64_IMM(BPF_REG_0, -2), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optlen = 4, + + .error = EFAULT_SETSOCKOPT, + }, + { + .descr = "setsockopt: deny ctx->optlen > input optlen", + .insns = { + /* ctx->optlen = 65 */ + BPF_MOV64_IMM(BPF_REG_0, 65), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optlen = 64, + + .error = EFAULT_SETSOCKOPT, + }, + { + .descr = "setsockopt: allow changing ctx->optlen within bounds", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 1 << 3 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 1 << 3), + /* } */ + + /* ctx->optlen = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + + /* return 1*/ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 1, 1, 1, 1 }, + .set_optlen = 4, + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: deny write ctx->retval", + .insns = { + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: deny read ctx->retval", + .insns = { + /* r6 = ctx->retval */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: deny writing to ctx->optval", + .insns = { + /* ctx->optval = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optval)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: deny writing to ctx->optval_end", + .insns = { + /* ctx->optval_end = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optval_end)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: allow IP_TOS <= 128", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r7 = ctx->optval + 1 */ + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1), + + /* r8 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 4), + + /* r9 = ctx->optval[0] */ + BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_6, 0), + + /* if (ctx->optval[0] < 128) */ + BPF_JMP_IMM(BPF_JGT, BPF_REG_9, 128, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } */ + + /* } else { */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 0x80 }, + .set_optlen = 1, + .get_optval = { 0x80 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: deny IP_TOS > 128", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r7 = ctx->optval + 1 */ + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1), + + /* r8 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 4), + + /* r9 = ctx->optval[0] */ + BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_6, 0), + + /* if (ctx->optval[0] < 128) */ + BPF_JMP_IMM(BPF_JGT, BPF_REG_9, 128, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } */ + + /* } else { */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 0x81 }, + .set_optlen = 1, + .get_optval = { 0x00 }, + .get_optlen = 1, + + .error = EPERM_SETSOCKOPT, + }, +}; + +static int load_prog(const struct bpf_insn *insns, + enum bpf_attach_type expected_attach_type) +{ + struct bpf_load_program_attr attr = { + .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT, + .expected_attach_type = expected_attach_type, + .insns = insns, + .license = "GPL", + .log_level = 2, + }; + int fd; + + for (; + insns[attr.insns_cnt].code != (BPF_JMP | BPF_EXIT); + attr.insns_cnt++) { + } + attr.insns_cnt++; + + fd = bpf_load_program_xattr(&attr, bpf_log_buf, sizeof(bpf_log_buf)); + if (verbose && fd < 0) + fprintf(stderr, "%s\n", bpf_log_buf); + + return fd; +} + +static int run_test(int cgroup_fd, struct sockopt_test *test) +{ + int sock_fd, err, prog_fd; + void *optval = NULL; + int ret = 0; + + prog_fd = load_prog(test->insns, test->expected_attach_type); + if (prog_fd < 0) { + if (test->error == DENY_LOAD) + return 0; + + log_err("Failed to load BPF program"); + return -1; + } + + err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0); + if (err < 0) { + if (test->error == DENY_ATTACH) + goto close_prog_fd; + + log_err("Failed to attach BPF program"); + ret = -1; + goto close_prog_fd; + } + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (sock_fd < 0) { + log_err("Failed to create AF_INET socket"); + ret = -1; + goto detach_prog; + } + + if (test->set_optlen) { + err = setsockopt(sock_fd, test->set_level, test->set_optname, + test->set_optval, test->set_optlen); + if (err) { + if (errno == EPERM && test->error == EPERM_SETSOCKOPT) + goto close_sock_fd; + if (errno == EFAULT && test->error == EFAULT_SETSOCKOPT) + goto free_optval; + + log_err("Failed to call setsockopt"); + ret = -1; + goto close_sock_fd; + } + } + + if (test->get_optlen) { + optval = malloc(test->get_optlen); + socklen_t optlen = test->get_optlen; + socklen_t expected_get_optlen = test->get_optlen_ret ?: + test->get_optlen; + + err = getsockopt(sock_fd, test->get_level, test->get_optname, + optval, &optlen); + if (err) { + if (errno == EPERM && test->error == EPERM_GETSOCKOPT) + goto free_optval; + if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT) + goto free_optval; + + log_err("Failed to call getsockopt"); + ret = -1; + goto free_optval; + } + + if (optlen != expected_get_optlen) { + errno = 0; + log_err("getsockopt returned unexpected optlen"); + ret = -1; + goto free_optval; + } + + if (memcmp(optval, test->get_optval, optlen) != 0) { + errno = 0; + log_err("getsockopt returned unexpected optval"); + ret = -1; + goto free_optval; + } + } + + ret = test->error != OK; + +free_optval: + free(optval); +close_sock_fd: + close(sock_fd); +detach_prog: + bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type); +close_prog_fd: + close(prog_fd); + return ret; +} + +void test_sockopt(void) +{ + int cgroup_fd, i; + + cgroup_fd = test__join_cgroup("/sockopt"); + if (CHECK_FAIL(cgroup_fd < 0)) + return; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + test__start_subtest(tests[i].descr); + CHECK_FAIL(run_test(cgroup_fd, &tests[i])); + } + + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c new file mode 100644 index 000000000000..8547ecbdc61f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +#define SOL_CUSTOM 0xdeadbeef +#define CUSTOM_INHERIT1 0 +#define CUSTOM_INHERIT2 1 +#define CUSTOM_LISTENER 2 + +static int connect_to_server(int server_fd) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int fd; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd < 0) { + log_err("Failed to create client socket"); + return -1; + } + + if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get server addr"); + goto out; + } + + if (connect(fd, (const struct sockaddr *)&addr, len) < 0) { + log_err("Fail to connect to server"); + goto out; + } + + return fd; + +out: + close(fd); + return -1; +} + +static int verify_sockopt(int fd, int optname, const char *msg, char expected) +{ + socklen_t optlen = 1; + char buf = 0; + int err; + + err = getsockopt(fd, SOL_CUSTOM, optname, &buf, &optlen); + if (err) { + log_err("%s: failed to call getsockopt", msg); + return 1; + } + + printf("%s %d: got=0x%x ? expected=0x%x\n", msg, optname, buf, expected); + + if (buf != expected) { + log_err("%s: unexpected getsockopt value %d != %d", msg, + buf, expected); + return 1; + } + + return 0; +} + +static pthread_mutex_t server_started_mtx = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t server_started = PTHREAD_COND_INITIALIZER; + +static void *server_thread(void *arg) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int fd = *(int *)arg; + int client_fd; + int err = 0; + + err = listen(fd, 1); + + pthread_mutex_lock(&server_started_mtx); + pthread_cond_signal(&server_started); + pthread_mutex_unlock(&server_started_mtx); + + if (CHECK_FAIL(err < 0)) { + perror("Failed to listed on socket"); + return NULL; + } + + err += verify_sockopt(fd, CUSTOM_INHERIT1, "listen", 1); + err += verify_sockopt(fd, CUSTOM_INHERIT2, "listen", 1); + err += verify_sockopt(fd, CUSTOM_LISTENER, "listen", 1); + + client_fd = accept(fd, (struct sockaddr *)&addr, &len); + if (CHECK_FAIL(client_fd < 0)) { + perror("Failed to accept client"); + return NULL; + } + + err += verify_sockopt(client_fd, CUSTOM_INHERIT1, "accept", 1); + err += verify_sockopt(client_fd, CUSTOM_INHERIT2, "accept", 1); + err += verify_sockopt(client_fd, CUSTOM_LISTENER, "accept", 0); + + close(client_fd); + + return (void *)(long)err; +} + +static int start_server(void) +{ + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_LOOPBACK), + }; + char buf; + int err; + int fd; + int i; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd < 0) { + log_err("Failed to create server socket"); + return -1; + } + + for (i = CUSTOM_INHERIT1; i <= CUSTOM_LISTENER; i++) { + buf = 0x01; + err = setsockopt(fd, SOL_CUSTOM, i, &buf, 1); + if (err) { + log_err("Failed to call setsockopt(%d)", i); + close(fd); + return -1; + } + } + + if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) < 0) { + log_err("Failed to bind socket"); + close(fd); + return -1; + } + + return fd; +} + +static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) +{ + enum bpf_attach_type attach_type; + enum bpf_prog_type prog_type; + struct bpf_program *prog; + int err; + + err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); + if (err) { + log_err("Failed to deduct types for %s BPF program", title); + return -1; + } + + prog = bpf_object__find_program_by_title(obj, title); + if (!prog) { + log_err("Failed to find %s BPF program", title); + return -1; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, + attach_type, 0); + if (err) { + log_err("Failed to attach %s BPF program", title); + return -1; + } + + return 0; +} + +static void run_test(int cgroup_fd) +{ + struct bpf_prog_load_attr attr = { + .file = "./sockopt_inherit.o", + }; + int server_fd = -1, client_fd; + struct bpf_object *obj; + void *server_err; + pthread_t tid; + int ignored; + int err; + + err = bpf_prog_load_xattr(&attr, &obj, &ignored); + if (CHECK_FAIL(err)) + return; + + err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt"); + if (CHECK_FAIL(err)) + goto close_bpf_object; + + err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt"); + if (CHECK_FAIL(err)) + goto close_bpf_object; + + server_fd = start_server(); + if (CHECK_FAIL(server_fd < 0)) + goto close_bpf_object; + + if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread, + (void *)&server_fd))) + goto close_server_fd; + + pthread_mutex_lock(&server_started_mtx); + pthread_cond_wait(&server_started, &server_started_mtx); + pthread_mutex_unlock(&server_started_mtx); + + client_fd = connect_to_server(server_fd); + if (CHECK_FAIL(client_fd < 0)) + goto close_server_fd; + + CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_INHERIT1, "connect", 0)); + CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_INHERIT2, "connect", 0)); + CHECK_FAIL(verify_sockopt(client_fd, CUSTOM_LISTENER, "connect", 0)); + + pthread_join(tid, &server_err); + + err = (int)(long)server_err; + CHECK_FAIL(err); + + close(client_fd); + +close_server_fd: + close(server_fd); +close_bpf_object: + bpf_object__close(obj); +} + +void test_sockopt_inherit(void) +{ + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/sockopt_inherit"); + if (CHECK_FAIL(cgroup_fd < 0)) + return; + + run_test(cgroup_fd); + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c new file mode 100644 index 000000000000..29188d6f5c8d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) +{ + enum bpf_attach_type attach_type; + enum bpf_prog_type prog_type; + struct bpf_program *prog; + int err; + + err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); + if (err) { + log_err("Failed to deduct types for %s BPF program", title); + return -1; + } + + prog = bpf_object__find_program_by_title(obj, title); + if (!prog) { + log_err("Failed to find %s BPF program", title); + return -1; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, + attach_type, BPF_F_ALLOW_MULTI); + if (err) { + log_err("Failed to attach %s BPF program", title); + return -1; + } + + return 0; +} + +static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title) +{ + enum bpf_attach_type attach_type; + enum bpf_prog_type prog_type; + struct bpf_program *prog; + int err; + + err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); + if (err) + return -1; + + prog = bpf_object__find_program_by_title(obj, title); + if (!prog) + return -1; + + err = bpf_prog_detach2(bpf_program__fd(prog), cgroup_fd, + attach_type); + if (err) + return -1; + + return 0; +} + +static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, + int cg_child, int sock_fd) +{ + socklen_t optlen; + __u8 buf; + int err; + + /* Set IP_TOS to the expected value (0x80). */ + + buf = 0x80; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x80) { + log_err("Unexpected getsockopt 0x%x != 0x80 without BPF", buf); + err = -1; + goto detach; + } + + /* Attach child program and make sure it returns new value: + * - kernel: -> 0x80 + * - child: 0x80 -> 0x90 + */ + + err = prog_attach(obj, cg_child, "cgroup/getsockopt/child"); + if (err) + goto detach; + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x90) { + log_err("Unexpected getsockopt 0x%x != 0x90", buf); + err = -1; + goto detach; + } + + /* Attach parent program and make sure it returns new value: + * - kernel: -> 0x80 + * - child: 0x80 -> 0x90 + * - parent: 0x90 -> 0xA0 + */ + + err = prog_attach(obj, cg_parent, "cgroup/getsockopt/parent"); + if (err) + goto detach; + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0xA0) { + log_err("Unexpected getsockopt 0x%x != 0xA0", buf); + err = -1; + goto detach; + } + + /* Setting unexpected initial sockopt should return EPERM: + * - kernel: -> 0x40 + * - child: unexpected 0x40, EPERM + * - parent: unexpected 0x40, EPERM + */ + + buf = 0x40; + if (setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1) < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (!err) { + log_err("Unexpected success from getsockopt(IP_TOS)"); + goto detach; + } + + /* Detach child program and make sure we still get EPERM: + * - kernel: -> 0x40 + * - parent: unexpected 0x40, EPERM + */ + + err = prog_detach(obj, cg_child, "cgroup/getsockopt/child"); + if (err) { + log_err("Failed to detach child program"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (!err) { + log_err("Unexpected success from getsockopt(IP_TOS)"); + goto detach; + } + + /* Set initial value to the one the parent program expects: + * - kernel: -> 0x90 + * - parent: 0x90 -> 0xA0 + */ + + buf = 0x90; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0xA0) { + log_err("Unexpected getsockopt 0x%x != 0xA0", buf); + err = -1; + goto detach; + } + +detach: + prog_detach(obj, cg_child, "cgroup/getsockopt/child"); + prog_detach(obj, cg_parent, "cgroup/getsockopt/parent"); + + return err; +} + +static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, + int cg_child, int sock_fd) +{ + socklen_t optlen; + __u8 buf; + int err; + + /* Set IP_TOS to the expected value (0x80). */ + + buf = 0x80; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x80) { + log_err("Unexpected getsockopt 0x%x != 0x80 without BPF", buf); + err = -1; + goto detach; + } + + /* Attach child program and make sure it adds 0x10. */ + + err = prog_attach(obj, cg_child, "cgroup/setsockopt"); + if (err) + goto detach; + + buf = 0x80; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x80 + 0x10) { + log_err("Unexpected getsockopt 0x%x != 0x80 + 0x10", buf); + err = -1; + goto detach; + } + + /* Attach parent program and make sure it adds another 0x10. */ + + err = prog_attach(obj, cg_parent, "cgroup/setsockopt"); + if (err) + goto detach; + + buf = 0x80; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x80 + 2 * 0x10) { + log_err("Unexpected getsockopt 0x%x != 0x80 + 2 * 0x10", buf); + err = -1; + goto detach; + } + +detach: + prog_detach(obj, cg_child, "cgroup/setsockopt"); + prog_detach(obj, cg_parent, "cgroup/setsockopt"); + + return err; +} + +void test_sockopt_multi(void) +{ + struct bpf_prog_load_attr attr = { + .file = "./sockopt_multi.o", + }; + int cg_parent = -1, cg_child = -1; + struct bpf_object *obj = NULL; + int sock_fd = -1; + int err = -1; + int ignored; + + cg_parent = test__join_cgroup("/parent"); + if (CHECK_FAIL(cg_parent < 0)) + goto out; + + cg_child = test__join_cgroup("/parent/child"); + if (CHECK_FAIL(cg_child < 0)) + goto out; + + err = bpf_prog_load_xattr(&attr, &obj, &ignored); + if (CHECK_FAIL(err)) + goto out; + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (CHECK_FAIL(sock_fd < 0)) + goto out; + + CHECK_FAIL(run_getsockopt_test(obj, cg_parent, cg_child, sock_fd)); + CHECK_FAIL(run_setsockopt_test(obj, cg_parent, cg_child, sock_fd)); + +out: + close(sock_fd); + bpf_object__close(obj); + close(cg_child); + close(cg_parent); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c new file mode 100644 index 000000000000..2061a6beac0f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +#define SOL_CUSTOM 0xdeadbeef + +static int getsetsockopt(void) +{ + int fd, err; + union { + char u8[4]; + __u32 u32; + char cc[16]; /* TCP_CA_NAME_MAX */ + } buf = {}; + socklen_t optlen; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd < 0) { + log_err("Failed to create socket"); + return -1; + } + + /* IP_TOS - BPF bypass */ + + buf.u8[0] = 0x08; + err = setsockopt(fd, SOL_IP, IP_TOS, &buf, 1); + if (err) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto err; + } + + buf.u8[0] = 0x00; + optlen = 1; + err = getsockopt(fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto err; + } + + if (buf.u8[0] != 0x08) { + log_err("Unexpected getsockopt(IP_TOS) buf[0] 0x%02x != 0x08", + buf.u8[0]); + goto err; + } + + /* IP_TTL - EPERM */ + + buf.u8[0] = 1; + err = setsockopt(fd, SOL_IP, IP_TTL, &buf, 1); + if (!err || errno != EPERM) { + log_err("Unexpected success from setsockopt(IP_TTL)"); + goto err; + } + + /* SOL_CUSTOM - handled by BPF */ + + buf.u8[0] = 0x01; + err = setsockopt(fd, SOL_CUSTOM, 0, &buf, 1); + if (err) { + log_err("Failed to call setsockopt"); + goto err; + } + + buf.u32 = 0x00; + optlen = 4; + err = getsockopt(fd, SOL_CUSTOM, 0, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt"); + goto err; + } + + if (optlen != 1) { + log_err("Unexpected optlen %d != 1", optlen); + goto err; + } + if (buf.u8[0] != 0x01) { + log_err("Unexpected buf[0] 0x%02x != 0x01", buf.u8[0]); + goto err; + } + + /* SO_SNDBUF is overwritten */ + + buf.u32 = 0x01010101; + err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buf, 4); + if (err) { + log_err("Failed to call setsockopt(SO_SNDBUF)"); + goto err; + } + + buf.u32 = 0x00; + optlen = 4; + err = getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(SO_SNDBUF)"); + goto err; + } + + if (buf.u32 != 0x55AA*2) { + log_err("Unexpected getsockopt(SO_SNDBUF) 0x%x != 0x55AA*2", + buf.u32); + goto err; + } + + /* TCP_CONGESTION can extend the string */ + + strcpy(buf.cc, "nv"); + err = setsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, strlen("nv")); + if (err) { + log_err("Failed to call setsockopt(TCP_CONGESTION)"); + goto err; + } + + + optlen = sizeof(buf.cc); + err = getsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(TCP_CONGESTION)"); + goto err; + } + + if (strcmp(buf.cc, "cubic") != 0) { + log_err("Unexpected getsockopt(TCP_CONGESTION) %s != %s", + buf.cc, "cubic"); + goto err; + } + + close(fd); + return 0; +err: + close(fd); + return -1; +} + +static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) +{ + enum bpf_attach_type attach_type; + enum bpf_prog_type prog_type; + struct bpf_program *prog; + int err; + + err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); + if (err) { + log_err("Failed to deduct types for %s BPF program", title); + return -1; + } + + prog = bpf_object__find_program_by_title(obj, title); + if (!prog) { + log_err("Failed to find %s BPF program", title); + return -1; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, + attach_type, 0); + if (err) { + log_err("Failed to attach %s BPF program", title); + return -1; + } + + return 0; +} + +static void run_test(int cgroup_fd) +{ + struct bpf_prog_load_attr attr = { + .file = "./sockopt_sk.o", + }; + struct bpf_object *obj; + int ignored; + int err; + + err = bpf_prog_load_xattr(&attr, &obj, &ignored); + if (CHECK_FAIL(err)) + return; + + err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt"); + if (CHECK_FAIL(err)) + goto close_bpf_object; + + err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt"); + if (CHECK_FAIL(err)) + goto close_bpf_object; + + CHECK_FAIL(getsetsockopt()); + +close_bpf_object: + bpf_object__close(obj); +} + +void test_sockopt_sk(void) +{ + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/sockopt_sk"); + if (CHECK_FAIL(cgroup_fd < 0)) + return; + + run_test(cgroup_fd); + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c index 114ebe6a438e..1ae00cd3174e 100644 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c @@ -11,19 +11,19 @@ void test_spinlock(void) void *ret; err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); - if (err) { + if (CHECK_FAIL(err)) { printf("test_spin_lock:bpf_prog_load errno %d\n", errno); goto close_prog; } for (i = 0; i < 4; i++) - assert(pthread_create(&thread_id[i], NULL, - &spin_lock_thread, &prog_fd) == 0); + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, + &spin_lock_thread, &prog_fd))) + goto close_prog; + for (i = 0; i < 4; i++) - assert(pthread_join(thread_id[i], &ret) == 0 && - ret == (void *)&prog_fd); - goto close_prog_noerr; + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || + ret != (void *)&prog_fd)) + goto close_prog; close_prog: - error_cnt++; -close_prog_noerr: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c index 3aab2b083c71..d841dced971f 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c @@ -4,11 +4,13 @@ void test_stacktrace_build_id(void) { int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; + const char *prog_name = "tracepoint/random/urandom_read"; const char *file = "./test_stacktrace_build_id.o"; - int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; - struct perf_event_attr attr = {}; + int err, prog_fd, stack_trace_len; __u32 key, previous_key, val, duration = 0; + struct bpf_program *prog; struct bpf_object *obj; + struct bpf_link *link = NULL; char buf[256]; int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; @@ -18,44 +20,16 @@ void test_stacktrace_build_id(void) retry: err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) - goto out; + return; - /* Get the ID for the sched/sched_switch tracepoint */ - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/random/urandom_read/id"); - efd = open(buf, O_RDONLY, 0); - if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name)) goto close_prog; - bytes = read(efd, buf, sizeof(buf)); - close(efd); - if (CHECK(bytes <= 0 || bytes >= sizeof(buf), - "read", "bytes %d errno %d\n", bytes, errno)) + link = bpf_program__attach_tracepoint(prog, "random", "urandom_read"); + if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link))) goto close_prog; - /* Open the perf event and attach bpf progrram */ - attr.config = strtol(buf, NULL, 0); - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; - attr.sample_period = 1; - attr.wakeup_events = 1; - pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, - 0 /* cpu 0 */, -1 /* group id */, - 0 /* flags */); - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", - pmu_fd, errno)) - goto close_prog; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", - err, errno)) - goto close_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", - err, errno)) - goto disable_pmu; - /* find map fds */ control_map_fd = bpf_find_map(__func__, obj, "control_map"); if (CHECK(control_map_fd < 0, "bpf_find_map control_map", @@ -77,9 +51,10 @@ retry: "err %d errno %d\n", err, errno)) goto disable_pmu; - assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") - == 0); - assert(system("./urandom_read") == 0); + if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null"))) + goto disable_pmu; + if (CHECK_FAIL(system("./urandom_read"))) + goto disable_pmu; /* disable stack trace collection */ key = 0; val = 1; @@ -133,8 +108,7 @@ retry: * try it one more time. */ if (build_id_matches < 1 && retry--) { - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - close(pmu_fd); + bpf_link__destroy(link); bpf_object__close(obj); printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", __func__); @@ -152,14 +126,8 @@ retry: "err %d errno %d\n", err, errno); disable_pmu: - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - -close_pmu: - close(pmu_fd); + bpf_link__destroy(link); close_prog: bpf_object__close(obj); - -out: - return; } diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index 8a114bb1c379..f62aa0eb959b 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -1,30 +1,51 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +static __u64 read_perf_max_sample_freq(void) +{ + __u64 sample_freq = 5000; /* fallback to 5000 on error */ + FILE *f; + + f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r"); + if (f == NULL) + return sample_freq; + fscanf(f, "%llu", &sample_freq); + fclose(f); + return sample_freq; +} + void test_stacktrace_build_id_nmi(void) { int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; + const char *prog_name = "tracepoint/random/urandom_read"; const char *file = "./test_stacktrace_build_id.o"; int err, pmu_fd, prog_fd; struct perf_event_attr attr = { - .sample_freq = 5000, .freq = 1, .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, }; __u32 key, previous_key, val, duration = 0; + struct bpf_program *prog; struct bpf_object *obj; + struct bpf_link *link; char buf[256]; int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; int build_id_matches = 0; int retry = 1; + attr.sample_freq = read_perf_max_sample_freq(); + retry: err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) return; + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name)) + goto close_prog; + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */, -1 /* group id */, 0 /* flags */); @@ -33,15 +54,12 @@ retry: pmu_fd, errno)) goto close_prog; - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", - err, errno)) - goto close_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", - err, errno)) - goto disable_pmu; + link = bpf_program__attach_perf_event(prog, pmu_fd); + if (CHECK(IS_ERR(link), "attach_perf_event", + "err %ld\n", PTR_ERR(link))) { + close(pmu_fd); + goto close_prog; + } /* find map fds */ control_map_fd = bpf_find_map(__func__, obj, "control_map"); @@ -64,9 +82,10 @@ retry: "err %d errno %d\n", err, errno)) goto disable_pmu; - assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") - == 0); - assert(system("taskset 0x1 ./urandom_read 100000") == 0); + if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null"))) + goto disable_pmu; + if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000"))) + goto disable_pmu; /* disable stack trace collection */ key = 0; val = 1; @@ -120,8 +139,7 @@ retry: * try it one more time. */ if (build_id_matches < 1 && retry--) { - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - close(pmu_fd); + bpf_link__destroy(link); bpf_object__close(obj); printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", __func__); @@ -140,11 +158,7 @@ retry: */ disable_pmu: - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - -close_pmu: - close(pmu_fd); - + bpf_link__destroy(link); close_prog: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c index 2bfd50a0d6d1..37269d23df93 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c @@ -4,65 +4,41 @@ void test_stacktrace_map(void) { int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; + const char *prog_name = "tracepoint/sched/sched_switch"; + int err, prog_fd, stack_trace_len; const char *file = "./test_stacktrace_map.o"; - int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len; - struct perf_event_attr attr = {}; __u32 key, val, duration = 0; + struct bpf_program *prog; struct bpf_object *obj; - char buf[256]; + struct bpf_link *link; err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) return; - /* Get the ID for the sched/sched_switch tracepoint */ - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); - efd = open(buf, O_RDONLY, 0); - if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name)) goto close_prog; - bytes = read(efd, buf, sizeof(buf)); - close(efd); - if (bytes <= 0 || bytes >= sizeof(buf)) + link = bpf_program__attach_tracepoint(prog, "sched", "sched_switch"); + if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link))) goto close_prog; - /* Open the perf event and attach bpf progrram */ - attr.config = strtol(buf, NULL, 0); - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; - attr.sample_period = 1; - attr.wakeup_events = 1; - pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, - 0 /* cpu 0 */, -1 /* group id */, - 0 /* flags */); - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", - pmu_fd, errno)) - goto close_prog; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (err) - goto disable_pmu; - - err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); - if (err) - goto disable_pmu; - /* find map fds */ control_map_fd = bpf_find_map(__func__, obj, "control_map"); - if (control_map_fd < 0) + if (CHECK_FAIL(control_map_fd < 0)) goto disable_pmu; stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); - if (stackid_hmap_fd < 0) + if (CHECK_FAIL(stackid_hmap_fd < 0)) goto disable_pmu; stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); - if (stackmap_fd < 0) + if (CHECK_FAIL(stackmap_fd < 0)) goto disable_pmu; stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); - if (stack_amap_fd < 0) + if (CHECK_FAIL(stack_amap_fd < 0)) goto disable_pmu; /* give some time for bpf program run */ @@ -79,25 +55,21 @@ void test_stacktrace_map(void) err = compare_map_keys(stackid_hmap_fd, stackmap_fd); if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", "err %d errno %d\n", err, errno)) - goto disable_pmu_noerr; + goto disable_pmu; err = compare_map_keys(stackmap_fd, stackid_hmap_fd); if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", "err %d errno %d\n", err, errno)) - goto disable_pmu_noerr; + goto disable_pmu; stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64); err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap", "err %d errno %d\n", err, errno)) - goto disable_pmu_noerr; + goto disable_pmu; - goto disable_pmu_noerr; disable_pmu: - error_cnt++; -disable_pmu_noerr: - ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); - close(pmu_fd); + bpf_link__destroy(link); close_prog: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c index 1f8387d80fd7..404a5498e1a3 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c @@ -3,31 +3,38 @@ void test_stacktrace_map_raw_tp(void) { + const char *prog_name = "tracepoint/sched/sched_switch"; int control_map_fd, stackid_hmap_fd, stackmap_fd; const char *file = "./test_stacktrace_map.o"; - int efd, err, prog_fd; __u32 key, val, duration = 0; + int err, prog_fd; + struct bpf_program *prog; struct bpf_object *obj; + struct bpf_link *link = NULL; err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) return; - efd = bpf_raw_tracepoint_open("sched_switch", prog_fd); - if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name)) + goto close_prog; + + link = bpf_program__attach_raw_tracepoint(prog, "sched_switch"); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) goto close_prog; /* find map fds */ control_map_fd = bpf_find_map(__func__, obj, "control_map"); - if (control_map_fd < 0) + if (CHECK_FAIL(control_map_fd < 0)) goto close_prog; stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); - if (stackid_hmap_fd < 0) + if (CHECK_FAIL(stackid_hmap_fd < 0)) goto close_prog; stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); - if (stackmap_fd < 0) + if (CHECK_FAIL(stackmap_fd < 0)) goto close_prog; /* give some time for bpf program run */ @@ -51,9 +58,8 @@ void test_stacktrace_map_raw_tp(void) "err %d errno %d\n", err, errno)) goto close_prog; - goto close_prog_noerr; close_prog: - error_cnt++; -close_prog_noerr: + if (!IS_ERR_OR_NULL(link)) + bpf_link__destroy(link); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c new file mode 100644 index 000000000000..bb8fe646dd9f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +/* test_tailcall_1 checks basic functionality by patching multiple locations + * in a single program for a single tail call slot with nop->jmp, jmp->nop + * and jmp->jmp rewrites. Also checks for nop->nop. + */ +static void test_tailcall_1(void) +{ + int err, map_fd, prog_fd, main_fd, i, j; + struct bpf_map *prog_array; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + char prog_name[32]; + char buff[128] = {}; + + err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != i, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + j = bpf_map__def(prog_array)->max_entries - 1 - i; + snprintf(prog_name, sizeof(prog_name), "classifier/%i", j); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + j = bpf_map__def(prog_array)->max_entries - 1 - i; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != j, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err >= 0 || errno != ENOENT)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } + +out: + bpf_object__close(obj); +} + +/* test_tailcall_2 checks that patching multiple programs for a single + * tail call slot works. It also jumps through several programs and tests + * the tail call limit counter. + */ +static void test_tailcall_2(void) +{ + int err, map_fd, prog_fd, main_fd, i; + struct bpf_map *prog_array; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + char prog_name[32]; + char buff[128] = {}; + + err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + i = 2; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); +out: + bpf_object__close(obj); +} + +/* test_tailcall_3 checks that the count value of the tail call limit + * enforcement matches with expectations. + */ +static void test_tailcall_3(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i, val; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + char buff[128] = {}; + + err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + prog = bpf_object__find_program_by_title(obj, "classifier/0"); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + return; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(map_fd < 0)) + return; + + i = 0; + err = bpf_map_lookup_elem(data_fd, &i, &val); + CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n", + err, errno, val); + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", + err, errno, retval); +out: + bpf_object__close(obj); +} + +/* test_tailcall_4 checks that the kernel properly selects indirect jump + * for the case where the key is not known. Latter is passed via global + * data to select different targets we can compare return value of. + */ +static void test_tailcall_4(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + static const int zero = 0; + char buff[128] = {}; + char prog_name[32]; + + err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + return; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(map_fd < 0)) + return; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != i, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } +out: + bpf_object__close(obj); +} + +/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates + * an indirect jump when the keys are const but different from different branches. + */ +static void test_tailcall_5(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 }; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 retval, duration; + static const int zero = 0; + char buff[128] = {}; + char prog_name[32]; + + err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + return; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(map_fd < 0)) + return; + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != i, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } + + for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, + &duration, &retval, NULL); + CHECK(err || retval != 3, "tailcall", + "err %d errno %d retval %d\n", err, errno, retval); + } +out: + bpf_object__close(obj); +} + +void test_tailcalls(void) +{ + if (test__start_subtest("tailcall_1")) + test_tailcall_1(); + if (test__start_subtest("tailcall_2")) + test_tailcall_2(); + if (test__start_subtest("tailcall_3")) + test_tailcall_3(); + if (test__start_subtest("tailcall_4")) + test_tailcall_4(); + if (test__start_subtest("tailcall_5")) + test_tailcall_5(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c index 958a3d88de99..1bdc1d86a50c 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c @@ -70,9 +70,6 @@ void test_task_fd_query_rawtp(void) if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) goto close_prog; - goto close_prog_noerr; close_prog: - error_cnt++; -close_prog_noerr: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c index d636a4f39476..3f131b8fe328 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c @@ -9,7 +9,7 @@ static void test_task_fd_query_tp_core(const char *probe_name, struct perf_event_attr attr = {}; __u64 probe_offset, probe_addr; __u32 len, prog_id, fd_type; - struct bpf_object *obj; + struct bpf_object *obj = NULL; __u32 duration = 0; char buf[256]; @@ -62,14 +62,9 @@ static void test_task_fd_query_tp_core(const char *probe_name, fd_type, buf)) goto close_pmu; - close(pmu_fd); - goto close_prog_noerr; - close_pmu: close(pmu_fd); close_prog: - error_cnt++; -close_prog_noerr: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c index bb8759d69099..594307dffd13 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c @@ -10,10 +10,8 @@ void test_tcp_estats(void) err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); CHECK(err, "", "err %d errno %d\n", err, errno); - if (err) { - error_cnt++; + if (err) return; - } bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c new file mode 100644 index 000000000000..f4cd60d6fba2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +struct tcp_rtt_storage { + __u32 invoked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +static void send_byte(int fd) +{ + char b = 0x55; + + if (CHECK_FAIL(write(fd, &b, sizeof(b)) != 1)) + perror("Failed to send single byte"); +} + +static int wait_for_ack(int fd, int retries) +{ + struct tcp_info info; + socklen_t optlen; + int i, err; + + for (i = 0; i < retries; i++) { + optlen = sizeof(info); + err = getsockopt(fd, SOL_TCP, TCP_INFO, &info, &optlen); + if (err < 0) { + log_err("Failed to lookup TCP stats"); + return err; + } + + if (info.tcpi_unacked == 0) + return 0; + + usleep(10); + } + + log_err("Did not receive ACK"); + return -1; +} + +static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked, + __u32 dsack_dups, __u32 delivered, __u32 delivered_ce, + __u32 icsk_retransmits) +{ + int err = 0; + struct tcp_rtt_storage val; + + if (CHECK_FAIL(bpf_map_lookup_elem(map_fd, &client_fd, &val) < 0)) { + perror("Failed to read socket storage"); + return -1; + } + + if (val.invoked != invoked) { + log_err("%s: unexpected bpf_tcp_sock.invoked %d != %d", + msg, val.invoked, invoked); + err++; + } + + if (val.dsack_dups != dsack_dups) { + log_err("%s: unexpected bpf_tcp_sock.dsack_dups %d != %d", + msg, val.dsack_dups, dsack_dups); + err++; + } + + if (val.delivered != delivered) { + log_err("%s: unexpected bpf_tcp_sock.delivered %d != %d", + msg, val.delivered, delivered); + err++; + } + + if (val.delivered_ce != delivered_ce) { + log_err("%s: unexpected bpf_tcp_sock.delivered_ce %d != %d", + msg, val.delivered_ce, delivered_ce); + err++; + } + + if (val.icsk_retransmits != icsk_retransmits) { + log_err("%s: unexpected bpf_tcp_sock.icsk_retransmits %d != %d", + msg, val.icsk_retransmits, icsk_retransmits); + err++; + } + + return err; +} + +static int connect_to_server(int server_fd) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int fd; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd < 0) { + log_err("Failed to create client socket"); + return -1; + } + + if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get server addr"); + goto out; + } + + if (connect(fd, (const struct sockaddr *)&addr, len) < 0) { + log_err("Fail to connect to server"); + goto out; + } + + return fd; + +out: + close(fd); + return -1; +} + +static int run_test(int cgroup_fd, int server_fd) +{ + struct bpf_prog_load_attr attr = { + .prog_type = BPF_PROG_TYPE_SOCK_OPS, + .file = "./tcp_rtt.o", + .expected_attach_type = BPF_CGROUP_SOCK_OPS, + }; + struct bpf_object *obj; + struct bpf_map *map; + int client_fd; + int prog_fd; + int map_fd; + int err; + + err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); + if (err) { + log_err("Failed to load BPF object"); + return -1; + } + + map = bpf_map__next(NULL, obj); + map_fd = bpf_map__fd(map); + + err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); + if (err) { + log_err("Failed to attach BPF program"); + goto close_bpf_object; + } + + client_fd = connect_to_server(server_fd); + if (client_fd < 0) { + err = -1; + goto close_bpf_object; + } + + err += verify_sk(map_fd, client_fd, "syn-ack", + /*invoked=*/1, + /*dsack_dups=*/0, + /*delivered=*/1, + /*delivered_ce=*/0, + /*icsk_retransmits=*/0); + + send_byte(client_fd); + if (wait_for_ack(client_fd, 100) < 0) { + err = -1; + goto close_client_fd; + } + + + err += verify_sk(map_fd, client_fd, "first payload byte", + /*invoked=*/2, + /*dsack_dups=*/0, + /*delivered=*/2, + /*delivered_ce=*/0, + /*icsk_retransmits=*/0); + +close_client_fd: + close(client_fd); + +close_bpf_object: + bpf_object__close(obj); + return err; +} + +static int start_server(void) +{ + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_LOOPBACK), + }; + int fd; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd < 0) { + log_err("Failed to create server socket"); + return -1; + } + + if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) < 0) { + log_err("Failed to bind socket"); + close(fd); + return -1; + } + + return fd; +} + +static pthread_mutex_t server_started_mtx = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t server_started = PTHREAD_COND_INITIALIZER; + +static void *server_thread(void *arg) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int fd = *(int *)arg; + int client_fd; + int err; + + err = listen(fd, 1); + + pthread_mutex_lock(&server_started_mtx); + pthread_cond_signal(&server_started); + pthread_mutex_unlock(&server_started_mtx); + + if (CHECK_FAIL(err < 0)) { + perror("Failed to listed on socket"); + return NULL; + } + + client_fd = accept(fd, (struct sockaddr *)&addr, &len); + if (CHECK_FAIL(client_fd < 0)) { + perror("Failed to accept client"); + return NULL; + } + + /* Wait for the next connection (that never arrives) + * to keep this thread alive to prevent calling + * close() on client_fd. + */ + if (CHECK_FAIL(accept(fd, (struct sockaddr *)&addr, &len) >= 0)) { + perror("Unexpected success in second accept"); + return NULL; + } + + close(client_fd); + + return NULL; +} + +void test_tcp_rtt(void) +{ + int server_fd, cgroup_fd; + pthread_t tid; + + cgroup_fd = test__join_cgroup("/tcp_rtt"); + if (CHECK_FAIL(cgroup_fd < 0)) + return; + + server_fd = start_server(); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + + if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread, + (void *)&server_fd))) + goto close_server_fd; + + pthread_mutex_lock(&server_started_mtx); + pthread_cond_wait(&server_started, &server_started_mtx); + pthread_mutex_unlock(&server_started_mtx); + + CHECK_FAIL(run_test(cgroup_fd, server_fd)); +close_server_fd: + close(server_fd); +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c new file mode 100644 index 000000000000..c32aa28bd93f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019 Facebook */ +#define _GNU_SOURCE +#include <sched.h> +#include <test_progs.h> + +#define MAX_CNT 100000 + +static __u64 time_get_ns(void) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000ull + ts.tv_nsec; +} + +static int test_task_rename(const char *prog) +{ + int i, fd, duration = 0, err; + char buf[] = "test\n"; + __u64 start_time; + + fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); + if (CHECK(fd < 0, "open /proc", "err %d", errno)) + return -1; + start_time = time_get_ns(); + for (i = 0; i < MAX_CNT; i++) { + err = write(fd, buf, sizeof(buf)); + if (err < 0) { + CHECK(err < 0, "task rename", "err %d", errno); + close(fd); + return -1; + } + } + printf("task_rename %s\t%lluK events per sec\n", prog, + MAX_CNT * 1000000ll / (time_get_ns() - start_time)); + close(fd); + return 0; +} + +static void test_run(const char *prog) +{ + test_task_rename(prog); +} + +static void setaffinity(void) +{ + cpu_set_t cpuset; + int cpu = 0; + + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + sched_setaffinity(0, sizeof(cpuset), &cpuset); +} + +void test_test_overhead(void) +{ + const char *kprobe_name = "kprobe/__set_task_comm"; + const char *kretprobe_name = "kretprobe/__set_task_comm"; + const char *raw_tp_name = "raw_tp/task_rename"; + const char *fentry_name = "fentry/__set_task_comm"; + const char *fexit_name = "fexit/__set_task_comm"; + const char *kprobe_func = "__set_task_comm"; + struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog; + struct bpf_program *fentry_prog, *fexit_prog; + struct bpf_object *obj; + struct bpf_link *link; + int err, duration = 0; + + obj = bpf_object__open_file("./test_overhead.o", NULL); + if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) + return; + + kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name); + if (CHECK(!kprobe_prog, "find_probe", + "prog '%s' not found\n", kprobe_name)) + goto cleanup; + kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name); + if (CHECK(!kretprobe_prog, "find_probe", + "prog '%s' not found\n", kretprobe_name)) + goto cleanup; + raw_tp_prog = bpf_object__find_program_by_title(obj, raw_tp_name); + if (CHECK(!raw_tp_prog, "find_probe", + "prog '%s' not found\n", raw_tp_name)) + goto cleanup; + fentry_prog = bpf_object__find_program_by_title(obj, fentry_name); + if (CHECK(!fentry_prog, "find_probe", + "prog '%s' not found\n", fentry_name)) + goto cleanup; + fexit_prog = bpf_object__find_program_by_title(obj, fexit_name); + if (CHECK(!fexit_prog, "find_probe", + "prog '%s' not found\n", fexit_name)) + goto cleanup; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; + + setaffinity(); + + /* base line run */ + test_run("base"); + + /* attach kprobe */ + link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */, + kprobe_func); + if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) + goto cleanup; + test_run("kprobe"); + bpf_link__destroy(link); + + /* attach kretprobe */ + link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */, + kprobe_func); + if (CHECK(IS_ERR(link), "attach kretprobe", "err %ld\n", PTR_ERR(link))) + goto cleanup; + test_run("kretprobe"); + bpf_link__destroy(link); + + /* attach raw_tp */ + link = bpf_program__attach_raw_tracepoint(raw_tp_prog, "task_rename"); + if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link))) + goto cleanup; + test_run("raw_tp"); + bpf_link__destroy(link); + + /* attach fentry */ + link = bpf_program__attach_trace(fentry_prog); + if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link))) + goto cleanup; + test_run("fentry"); + bpf_link__destroy(link); + + /* attach fexit */ + link = bpf_program__attach_trace(fexit_prog); + if (CHECK(IS_ERR(link), "attach fexit", "err %ld\n", PTR_ERR(link))) + goto cleanup; + test_run("fexit"); + bpf_link__destroy(link); +cleanup: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c index a2f476f91637..fb095e5cd9af 100644 --- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -13,6 +13,9 @@ void test_tp_attach_query(void) struct bpf_prog_info prog_info; char buf[256]; + for (i = 0; i < num_progs; i++) + obj[i] = NULL; + snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); efd = open(buf, O_RDONLY, 0); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c index a74167289545..dcb5ecac778e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -16,10 +16,8 @@ void test_xdp(void) int err, prog_fd, map_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (err) { - error_cnt++; + if (CHECK_FAIL(err)) return; - } map_fd = bpf_find_map(__func__, obj, "vip2tnl"); if (map_fd < 0) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 922aa0a19764..3744196d7cba 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -10,10 +10,8 @@ void test_xdp_adjust_tail(void) int err, prog_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (err) { - error_cnt++; + if (CHECK_FAIL(err)) return; - } err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), buf, &size, &retval, &duration); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c index 09e6b46f5515..c9404e6b226e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c @@ -31,10 +31,8 @@ void test_xdp_noinline(void) u32 *magic = (u32 *)buf; err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (err) { - error_cnt++; + if (CHECK_FAIL(err)) return; - } map_fd = bpf_find_map(__func__, obj, "vip_map"); if (map_fd < 0) @@ -73,9 +71,10 @@ void test_xdp_noinline(void) bytes += stats[i].bytes; pkts += stats[i].pkts; } - if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { - error_cnt++; - printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts); + if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 || + pkts != NUM_ITER * 2)) { + printf("test_xdp_noinline:FAIL:stats %lld %lld\n", + bytes, pkts); } out: bpf_object__close(obj); |