diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
71 files changed, 3975 insertions, 227 deletions
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c index bb0acd79d28a..40dd57fca5cc 100644 --- a/tools/testing/selftests/bpf/progs/arena_atomics.c +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c @@ -4,6 +4,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <stdbool.h> +#include <stdatomic.h> #include "bpf_arena_common.h" struct { @@ -77,8 +78,13 @@ int sub(const void *ctx) return 0; } +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING +_Atomic __u64 __arena_global and64_value = (0x110ull << 32); +_Atomic __u32 __arena_global and32_value = 0x110; +#else __u64 __arena_global and64_value = (0x110ull << 32); __u32 __arena_global and32_value = 0x110; +#endif SEC("raw_tp/sys_enter") int and(const void *ctx) @@ -86,16 +92,25 @@ int and(const void *ctx) if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; #ifdef ENABLE_ATOMICS_TESTS - +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING + __c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed); + __c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed); +#else __sync_fetch_and_and(&and64_value, 0x011ull << 32); __sync_fetch_and_and(&and32_value, 0x011); #endif +#endif return 0; } +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING +_Atomic __u32 __arena_global or32_value = 0x110; +_Atomic __u64 __arena_global or64_value = (0x110ull << 32); +#else __u32 __arena_global or32_value = 0x110; __u64 __arena_global or64_value = (0x110ull << 32); +#endif SEC("raw_tp/sys_enter") int or(const void *ctx) @@ -103,15 +118,25 @@ int or(const void *ctx) if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; #ifdef ENABLE_ATOMICS_TESTS +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING + __c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed); + __c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed); +#else __sync_fetch_and_or(&or64_value, 0x011ull << 32); __sync_fetch_and_or(&or32_value, 0x011); #endif +#endif return 0; } +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING +_Atomic __u64 __arena_global xor64_value = (0x110ull << 32); +_Atomic __u32 __arena_global xor32_value = 0x110; +#else __u64 __arena_global xor64_value = (0x110ull << 32); __u32 __arena_global xor32_value = 0x110; +#endif SEC("raw_tp/sys_enter") int xor(const void *ctx) @@ -119,9 +144,14 @@ int xor(const void *ctx) if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; #ifdef ENABLE_ATOMICS_TESTS +#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING + __c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed); + __c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed); +#else __sync_fetch_and_xor(&xor64_value, 0x011ull << 32); __sync_fetch_and_xor(&xor32_value, 0x011); #endif +#endif return 0; } diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c index d665b8a15cc4..f089faa97ae6 100644 --- a/tools/testing/selftests/bpf/progs/bpf_cubic.c +++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only -/* WARNING: This implemenation is not necessarily the same +/* WARNING: This implementation is not necessarily the same * as the tcp_cubic.c. The purpose is mainly for testing * the kernel BPF logic. * @@ -314,7 +314,7 @@ static void bictcp_update(struct bpf_bictcp *ca, __u32 cwnd, __u32 acked) * (so time^3 is done by using 64 bit) * and without the support of division of 64bit numbers * (so all divisions are done by using 32 bit) - * also NOTE the unit of those veriables + * also NOTE the unit of those variables * time = (t - K) / 2^bictcp_HZ * c = bic_scale >> 10 * rtt = (srtt >> 3) / HZ @@ -507,7 +507,7 @@ void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample) __u32 delay; bpf_cubic_acked_called = 1; - /* Some calls are for duplicates without timetamps */ + /* Some calls are for duplicates without timestamps */ if (sample->rtt_us < 0) return; diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c index 02f552e7fd4d..7cd73e75f52a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c +++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c @@ -26,7 +26,7 @@ static bool before(__u32 seq1, __u32 seq2) char _license[] SEC("license") = "GPL"; -volatile const char fallback[TCP_CA_NAME_MAX]; +volatile const char fallback_cc[TCP_CA_NAME_MAX]; const char bpf_dctcp[] = "bpf_dctcp"; const char tcp_cdg[] = "cdg"; char cc_res[TCP_CA_NAME_MAX]; @@ -71,10 +71,10 @@ void BPF_PROG(bpf_dctcp_init, struct sock *sk) struct bpf_dctcp *ca = inet_csk_ca(sk); int *stg; - if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) { + if (!(tp->ecn_flags & TCP_ECN_OK) && fallback_cc[0]) { /* Switch to fallback */ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, - (void *)fallback, sizeof(fallback)) == -EBUSY) + (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY) ebusy_cnt++; /* Switch back to myself and the recurred bpf_dctcp_init() @@ -87,7 +87,7 @@ void BPF_PROG(bpf_dctcp_init, struct sock *sk) /* Switch back to fallback */ if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, - (void *)fallback, sizeof(fallback)) == -EBUSY) + (void *)fallback_cc, sizeof(fallback_cc)) == -EBUSY) ebusy_cnt++; /* Expecting -ENOTSUPP for tcp_cdg_res */ diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 81097a3f15eb..eccaf955e394 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -2,6 +2,9 @@ #ifndef __BPF_MISC_H__ #define __BPF_MISC_H__ +#define XSTR(s) STR(s) +#define STR(s) #s + /* This set of attributes controls behavior of the * test_loader.c:test_loader__run_subtests(). * @@ -22,10 +25,49 @@ * * __msg Message expected to be found in the verifier log. * Multiple __msg attributes could be specified. + * To match a regular expression use "{{" "}}" brackets, + * e.g. "foo{{[0-9]+}}" matches strings like "foo007". + * Extended POSIX regular expression syntax is allowed + * inside the brackets. * __msg_unpriv Same as __msg but for unprivileged mode. * - * __regex Same as __msg, but using a regular expression. - * __regex_unpriv Same as __msg_unpriv but using a regular expression. + * __xlated Expect a line in a disassembly log after verifier applies rewrites. + * Multiple __xlated attributes could be specified. + * Regular expressions could be specified same way as in __msg. + * __xlated_unpriv Same as __xlated but for unprivileged mode. + * + * __jited Match a line in a disassembly of the jited BPF program. + * Has to be used after __arch_* macro. + * For example: + * + * __arch_x86_64 + * __jited(" endbr64") + * __jited(" nopl (%rax,%rax)") + * __jited(" xorq %rax, %rax") + * ... + * __naked void some_test(void) + * { + * asm volatile (... ::: __clobber_all); + * } + * + * Regular expressions could be included in patterns same way + * as in __msg. + * + * By default assume that each pattern has to be matched on the + * next consecutive line of disassembly, e.g.: + * + * __jited(" endbr64") # matched on line N + * __jited(" nopl (%rax,%rax)") # matched on line N+1 + * + * If match occurs on a wrong line an error is reported. + * To override this behaviour use literal "...", e.g.: + * + * __jited(" endbr64") # matched on line N + * __jited("...") # not matched + * __jited(" nopl (%rax,%rax)") # matched on any line >= N + * + * __jited_unpriv Same as __jited but for unprivileged mode. + * * * __success Expect program load success in privileged mode. * __success_unpriv Expect program load success in unprivileged mode. @@ -60,14 +102,20 @@ * __auxiliary Annotated program is not a separate test, but used as auxiliary * for some other test cases and should always be loaded. * __auxiliary_unpriv Same, but load program in unprivileged mode. + * + * __arch_* Specify on which architecture the test case should be tested. + * Several __arch_* annotations could be specified at once. + * When test case is not run on current arch it is marked as skipped. */ -#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) -#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex))) +#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg))) +#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg))) +#define __jited(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc))) -#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg))) -#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex))) +#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg))) +#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg))) +#define __jited_unpriv(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg))) #define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv"))) #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) @@ -77,6 +125,10 @@ #define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) #define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) #define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path))) +#define __arch(arch) __attribute__((btf_decl_tag("comment:test_arch=" arch))) +#define __arch_x86_64 __arch("X86_64") +#define __arch_arm64 __arch("ARM64") +#define __arch_riscv64 __arch("RISCV64") /* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) diff --git a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c index 1a476d8ed354..9e7d9674ce2a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c +++ b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c @@ -43,9 +43,7 @@ int BPF_KPROBE(handle_sys_prctl) /* test for PT_REGS_PARM */ -#if !defined(bpf_target_arm64) && !defined(bpf_target_s390) bpf_probe_read_kernel(&tmp, sizeof(tmp), &PT_REGS_PARM1_SYSCALL(real_regs)); -#endif arg1 = tmp; bpf_probe_read_kernel(&arg2, sizeof(arg2), &PT_REGS_PARM2_SYSCALL(real_regs)); bpf_probe_read_kernel(&arg3, sizeof(arg3), &PT_REGS_PARM3_SYSCALL(real_regs)); diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h index a0778fe7857a..41d59f0ee606 100644 --- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h @@ -3,8 +3,6 @@ #ifndef __PROGS_CG_STORAGE_MULTI_H #define __PROGS_CG_STORAGE_MULTI_H -#include <asm/types.h> - struct cgroup_value { __u32 egress_pkts; __u32 ingress_pkts; diff --git a/tools/testing/selftests/bpf/progs/cgroup_ancestor.c b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c new file mode 100644 index 000000000000..8c2deb4fc493 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_ancestor.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_tracing_net.h" +#define NUM_CGROUP_LEVELS 4 + +__u64 cgroup_ids[NUM_CGROUP_LEVELS]; +__u16 dport; + +static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level) +{ + /* [1] &level passed to external function that may change it, it's + * incompatible with loop unroll. + */ + cgroup_ids[level] = bpf_skb_ancestor_cgroup_id(skb, level); +} + +SEC("tc") +int log_cgroup_id(struct __sk_buff *skb) +{ + struct sock *sk = (void *)skb->sk; + + if (!sk) + return TC_ACT_OK; + + sk = bpf_core_cast(sk, struct sock); + if (sk->sk_protocol == IPPROTO_UDP && sk->sk_dport == dport) { + log_nth_level(skb, 0); + log_nth_level(skb, 1); + log_nth_level(skb, 2); + log_nth_level(skb, 3); + } + + return TC_ACT_OK; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/cgroup_storage.c b/tools/testing/selftests/bpf/progs/cgroup_storage.c new file mode 100644 index 000000000000..db1e4d2d3281 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_storage.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u64); +} cgroup_storage SEC(".maps"); + +SEC("cgroup_skb/egress") +int bpf_prog(struct __sk_buff *skb) +{ + __u64 *counter; + + counter = bpf_get_local_storage(&cgroup_storage, 0); + __sync_fetch_and_add(counter, 1); + + /* Drop one out of every two packets */ + return (*counter & 1); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c index 79b54a4fa244..c1dfbd2b56fc 100644 --- a/tools/testing/selftests/bpf/progs/dev_cgroup.c +++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c @@ -41,14 +41,14 @@ int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx) bpf_trace_printk(fmt, sizeof(fmt), ctx->major, ctx->minor); #endif - /* Allow access to /dev/zero and /dev/random. + /* Allow access to /dev/null and /dev/urandom. * Forbid everything else. */ if (ctx->major != 1 || type != BPF_DEVCG_DEV_CHAR) return 0; switch (ctx->minor) { - case 5: /* 1:5 /dev/zero */ + case 3: /* 1:3 /dev/null */ case 9: /* 1:9 /dev/urandom */ return 1; } diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index c3bc186af21e..8f36c9de7591 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -965,7 +965,7 @@ int dynptr_invalidate_slice_reinit(void *ctx) * mem_or_null pointers. */ SEC("?raw_tp") -__failure __regex("R[0-9]+ type=scalar expected=percpu_ptr_") +__failure __msg("R{{[0-9]+}} type=scalar expected=percpu_ptr_") int dynptr_invalidate_slice_or_null(void *ctx) { struct bpf_dynptr ptr; @@ -983,7 +983,7 @@ int dynptr_invalidate_slice_or_null(void *ctx) /* Destruction of dynptr should also any slices obtained from it */ SEC("?raw_tp") -__failure __regex("R[0-9]+ invalid mem access 'scalar'") +__failure __msg("R{{[0-9]+}} invalid mem access 'scalar'") int dynptr_invalidate_slice_failure(void *ctx) { struct bpf_dynptr ptr1; @@ -1070,7 +1070,7 @@ int dynptr_read_into_slot(void *ctx) /* bpf_dynptr_slice()s are read-only and cannot be written to */ SEC("?tc") -__failure __regex("R[0-9]+ cannot write into rdonly_mem") +__failure __msg("R{{[0-9]+}} cannot write into rdonly_mem") int skb_invalid_slice_write(struct __sk_buff *skb) { struct bpf_dynptr ptr; diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c new file mode 100644 index 000000000000..33d3a57bee90 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/epilogue_exit.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +__success +/* save __u64 *ctx to stack */ +__xlated("0: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("1: r1 = *(u64 *)(r1 +0)") +__xlated("2: r2 = *(u64 *)(r1 +0)") +__xlated("3: r3 = 0") +__xlated("4: r4 = 1") +__xlated("5: if r2 == 0x0 goto pc+10") +__xlated("6: r0 = 0") +__xlated("7: *(u64 *)(r1 +0) = r3") +/* epilogue */ +__xlated("8: r1 = *(u64 *)(r10 -8)") +__xlated("9: r1 = *(u64 *)(r1 +0)") +__xlated("10: r6 = *(u64 *)(r1 +0)") +__xlated("11: r6 += 10000") +__xlated("12: *(u64 *)(r1 +0) = r6") +__xlated("13: r0 = r6") +__xlated("14: r0 *= 2") +__xlated("15: exit") +/* 2nd part of the main prog after the first exit */ +__xlated("16: *(u64 *)(r1 +0) = r4") +__xlated("17: r0 = 1") +/* Clear the r1 to ensure it does not have + * off-by-1 error and ensure it jumps back to the + * beginning of epilogue which initializes + * the r1 with the ctx ptr. + */ +__xlated("18: r1 = 0") +__xlated("19: gotol pc-12") +SEC("struct_ops/test_epilogue_exit") +__naked int test_epilogue_exit(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r2 = *(u64 *)(r1 +0);" + "r3 = 0;" + "r4 = 1;" + "if r2 == 0 goto +3;" + "r0 = 0;" + "*(u64 *)(r1 + 0) = r3;" + "exit;" + "*(u64 *)(r1 + 0) = r4;" + "r0 = 1;" + "r1 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops epilogue_exit = { + .test_epilogue = (void *)test_epilogue_exit, +}; + +SEC("syscall") +__retval(20000) +int syscall_epilogue_exit0(void *ctx) +{ + struct st_ops_args args = { .a = 1 }; + + return bpf_kfunc_st_ops_test_epilogue(&args); +} + +SEC("syscall") +__retval(20002) +int syscall_epilogue_exit1(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_epilogue(&args); +} diff --git a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c new file mode 100644 index 000000000000..7275dd594de0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +static __noinline __used int subprog(struct st_ops_args *args) +{ + args->a += 1; + return args->a; +} + +SEC("struct_ops/test_epilogue_subprog") +int BPF_PROG(test_epilogue_subprog, struct st_ops_args *args) +{ + subprog(args); + return args->a; +} + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); + __array(values, void (void)); +} epilogue_map SEC(".maps") = { + .values = { + [0] = (void *)&test_epilogue_subprog, + } +}; + +SEC("struct_ops/test_epilogue_tailcall") +int test_epilogue_tailcall(unsigned long long *ctx) +{ + bpf_tail_call(ctx, &epilogue_map, 0); + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops epilogue_tailcall = { + .test_epilogue = (void *)test_epilogue_tailcall, +}; + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops epilogue_subprog = { + .test_epilogue = (void *)test_epilogue_subprog, +}; + +SEC("syscall") +int syscall_epilogue_tailcall(struct st_ops_args *args) +{ + return bpf_kfunc_st_ops_test_epilogue(args); +} diff --git a/tools/testing/selftests/bpf/progs/err.h b/tools/testing/selftests/bpf/progs/err.h index d66d283d9e59..38529779a236 100644 --- a/tools/testing/selftests/bpf/progs/err.h +++ b/tools/testing/selftests/bpf/progs/err.h @@ -5,6 +5,16 @@ #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) (unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO +#define __STR(x) #x + +#define set_if_not_errno_or_zero(x, y) \ +({ \ + asm volatile ("if %0 s< -4095 goto +1\n" \ + "if %0 s<= 0 goto +1\n" \ + "%0 = " __STR(y) "\n" \ + : "+r"(x)); \ +}) + static inline int IS_ERR_OR_NULL(const void *ptr) { return !ptr || IS_ERR_VALUE((unsigned long)ptr); diff --git a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c index 68587b1de34e..30fd504856c7 100644 --- a/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c +++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c @@ -4,34 +4,16 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, __u64); -} cg_ids SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, __u32); -} pidmap SEC(".maps"); +__u64 cg_id; +__u64 expected_pid; SEC("tracepoint/syscalls/sys_enter_nanosleep") int trace(void *ctx) { __u32 pid = bpf_get_current_pid_tgid(); - __u32 key = 0, *expected_pid; - __u64 *val; - - expected_pid = bpf_map_lookup_elem(&pidmap, &key); - if (!expected_pid || *expected_pid != pid) - return 0; - val = bpf_map_lookup_elem(&cg_ids, &key); - if (val) - *val = bpf_get_current_cgroup_id(); + if (expected_pid == pid) + cg_id = bpf_get_current_cgroup_id(); return 0; } diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c new file mode 100644 index 000000000000..df1d3db60b1b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_testmod.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include "bpf_experimental.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +SEC("raw_tp/sys_enter") +__success +int iter_next_trusted(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task_vma vma_it; + struct vm_area_struct *vma_ptr; + + bpf_iter_task_vma_new(&vma_it, cur_task, 0); + + vma_ptr = bpf_iter_task_vma_next(&vma_it); + if (vma_ptr == NULL) + goto out; + + bpf_kfunc_trusted_vma_test(vma_ptr); +out: + bpf_iter_task_vma_destroy(&vma_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int iter_next_trusted_or_null(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task_vma vma_it; + struct vm_area_struct *vma_ptr; + + bpf_iter_task_vma_new(&vma_it, cur_task, 0); + + vma_ptr = bpf_iter_task_vma_next(&vma_it); + + bpf_kfunc_trusted_vma_test(vma_ptr); + + bpf_iter_task_vma_destroy(&vma_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__success +int iter_next_rcu(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task task_it; + struct task_struct *task_ptr; + + bpf_iter_task_new(&task_it, cur_task, 0); + + task_ptr = bpf_iter_task_next(&task_it); + if (task_ptr == NULL) + goto out; + + bpf_kfunc_rcu_task_test(task_ptr); +out: + bpf_iter_task_destroy(&task_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int iter_next_rcu_or_null(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task task_it; + struct task_struct *task_ptr; + + bpf_iter_task_new(&task_it, cur_task, 0); + + task_ptr = bpf_iter_task_next(&task_it); + + bpf_kfunc_rcu_task_test(task_ptr); + + bpf_iter_task_destroy(&task_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__failure __msg("R1 must be referenced or trusted") +int iter_next_rcu_not_trusted(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct bpf_iter_task task_it; + struct task_struct *task_ptr; + + bpf_iter_task_new(&task_it, cur_task, 0); + + task_ptr = bpf_iter_task_next(&task_it); + if (task_ptr == NULL) + goto out; + + bpf_kfunc_trusted_task_test(task_ptr); +out: + bpf_iter_task_destroy(&task_it); + return 0; +} + +SEC("raw_tp/sys_enter") +__failure __msg("R1 cannot write into rdonly_mem") +/* Message should not be 'R1 cannot write into rdonly_trusted_mem' */ +int iter_next_ptr_mem_not_trusted(const void *ctx) +{ + struct bpf_iter_num num_it; + int *num_ptr; + + bpf_iter_num_new(&num_it, 0, 10); + + num_ptr = bpf_iter_num_next(&num_it); + if (num_ptr == NULL) + goto out; + + bpf_kfunc_trusted_num_test(num_ptr); +out: + bpf_iter_num_destroy(&num_it); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c index 3873fb6c292a..4a176e6aede8 100644 --- a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c +++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c @@ -12,6 +12,7 @@ struct bpf_iter_testmod_seq { extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym; extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym; +extern s64 bpf_iter_testmod_seq_value(int blah, struct bpf_iter_testmod_seq *it) __ksym; extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym; const volatile __s64 exp_empty = 0 + 1; @@ -76,4 +77,53 @@ int testmod_seq_truncated(const void *ctx) return 0; } +SEC("?raw_tp") +__failure +__msg("expected an initialized iter_testmod_seq as arg #2") +int testmod_seq_getter_before_bad(const void *ctx) +{ + struct bpf_iter_testmod_seq it; + + return bpf_iter_testmod_seq_value(0, &it); +} + +SEC("?raw_tp") +__failure +__msg("expected an initialized iter_testmod_seq as arg #2") +int testmod_seq_getter_after_bad(const void *ctx) +{ + struct bpf_iter_testmod_seq it; + s64 sum = 0, *v; + + bpf_iter_testmod_seq_new(&it, 100, 100); + + while ((v = bpf_iter_testmod_seq_next(&it))) { + sum += *v; + } + + bpf_iter_testmod_seq_destroy(&it); + + return sum + bpf_iter_testmod_seq_value(0, &it); +} + +SEC("?socket") +__success __retval(1000000) +int testmod_seq_getter_good(const void *ctx) +{ + struct bpf_iter_testmod_seq it; + s64 sum = 0, *v; + + bpf_iter_testmod_seq_new(&it, 100, 100); + + while ((v = bpf_iter_testmod_seq_next(&it))) { + sum += *v; + } + + sum *= bpf_iter_testmod_seq_value(0, &it); + + bpf_iter_testmod_seq_destroy(&it); + + return sum; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c index 4b0b7b79cdfb..08fae306539c 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -150,4 +150,11 @@ int kfunc_call_test_mem_acquire_fail(struct __sk_buff *skb) return ret; } +SEC("?tc") +int kfunc_call_test_pointer_arg_type_mismatch(struct __sk_buff *skb) +{ + bpf_kfunc_call_test_pass_ctx((void *)10); + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c index 75043ffc5dad..b092a72b2c9d 100644 --- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -8,9 +8,12 @@ #include "../bpf_experimental.h" #include "../bpf_testmod/bpf_testmod_kfunc.h" +struct plain_local; + struct node_data { long key; long data; + struct plain_local __kptr * stashed_in_local_kptr; struct bpf_rb_node node; }; @@ -85,6 +88,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) static int create_and_stash(int idx, int val) { + struct plain_local *inner_local_kptr; struct map_value *mapval; struct node_data *res; @@ -92,11 +96,25 @@ static int create_and_stash(int idx, int val) if (!mapval) return 1; + inner_local_kptr = bpf_obj_new(typeof(*inner_local_kptr)); + if (!inner_local_kptr) + return 2; + res = bpf_obj_new(typeof(*res)); - if (!res) - return 1; + if (!res) { + bpf_obj_drop(inner_local_kptr); + return 3; + } res->key = val; + inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr); + if (inner_local_kptr) { + /* Should never happen, we just obj_new'd res */ + bpf_obj_drop(inner_local_kptr); + bpf_obj_drop(res); + return 4; + } + res = bpf_kptr_xchg(&mapval->node, res); if (res) bpf_obj_drop(res); @@ -169,6 +187,7 @@ long stash_local_with_root(void *ctx) SEC("tc") long unstash_rb_node(void *ctx) { + struct plain_local *inner_local_kptr = NULL; struct map_value *mapval; struct node_data *res; long retval; @@ -180,6 +199,13 @@ long unstash_rb_node(void *ctx) res = bpf_kptr_xchg(&mapval->node, NULL); if (res) { + inner_local_kptr = bpf_kptr_xchg(&res->stashed_in_local_kptr, inner_local_kptr); + if (!inner_local_kptr) { + bpf_obj_drop(res); + return 1; + } + bpf_obj_drop(inner_local_kptr); + retval = res->key; bpf_obj_drop(res); return retval; diff --git a/tools/testing/selftests/bpf/progs/lsm_tailcall.c b/tools/testing/selftests/bpf/progs/lsm_tailcall.c new file mode 100644 index 000000000000..49c075ce2d4c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/lsm_tailcall.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Huawei Technologies Co., Ltd */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +SEC("lsm/file_permission") +int lsm_file_permission_prog(void *ctx) +{ + return 0; +} + +SEC("lsm/file_alloc_security") +int lsm_file_alloc_security_prog(void *ctx) +{ + return 0; +} + +SEC("lsm/file_alloc_security") +int lsm_file_alloc_security_entry(void *ctx) +{ + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/mmap_inner_array.c b/tools/testing/selftests/bpf/progs/mmap_inner_array.c new file mode 100644 index 000000000000..90aacbc2938a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/mmap_inner_array.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct inner_array_type { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(map_flags, BPF_F_MMAPABLE); + __type(key, __u32); + __type(value, __u64); + __uint(max_entries, 1); +} inner_array SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(key_size, 4); + __uint(value_size, 4); + __uint(max_entries, 1); + __array(values, struct inner_array_type); +} outer_map SEC(".maps"); + +int pid = 0; +__u64 match_value = 0x13572468; +bool done = false; +bool pid_match = false; +bool outer_map_match = false; + +SEC("fentry/" SYS_PREFIX "sys_nanosleep") +int add_to_list_in_inner_array(void *ctx) +{ + __u32 curr_pid, zero = 0; + struct bpf_map *map; + __u64 *value; + + curr_pid = (u32)bpf_get_current_pid_tgid(); + if (done || curr_pid != pid) + return 0; + + pid_match = true; + map = bpf_map_lookup_elem(&outer_map, &curr_pid); + if (!map) + return 0; + + outer_map_match = true; + value = bpf_map_lookup_elem(map, &zero); + if (!value) + return 0; + + *value = match_value; + done = true; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/nested_acquire.c b/tools/testing/selftests/bpf/progs/nested_acquire.c new file mode 100644 index 000000000000..8e521a21d995 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/nested_acquire.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +SEC("tp_btf/tcp_probe") +__success +int BPF_PROG(test_nested_acquire_nonzero, struct sock *sk, struct sk_buff *skb) +{ + struct sk_buff *ptr; + + ptr = bpf_kfunc_nested_acquire_nonzero_offset_test(&sk->sk_write_queue); + + bpf_kfunc_nested_release_test(ptr); + return 0; +} + +SEC("tp_btf/tcp_probe") +__success +int BPF_PROG(test_nested_acquire_zero, struct sock *sk, struct sk_buff *skb) +{ + struct sk_buff *ptr; + + ptr = bpf_kfunc_nested_acquire_zero_offset_test(&sk->__sk_common); + + bpf_kfunc_nested_release_test(ptr); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue.c b/tools/testing/selftests/bpf/progs/pro_epilogue.c new file mode 100644 index 000000000000..44bc3f06b4b6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pro_epilogue.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +void __kfunc_btf_root(void) +{ + bpf_kfunc_st_ops_inc10(NULL); +} + +static __noinline __used int subprog(struct st_ops_args *args) +{ + args->a += 1; + return args->a; +} + +__success +/* prologue */ +__xlated("0: r6 = *(u64 *)(r1 +0)") +__xlated("1: r7 = *(u64 *)(r6 +0)") +__xlated("2: r7 += 1000") +__xlated("3: *(u64 *)(r6 +0) = r7") +/* main prog */ +__xlated("4: r1 = *(u64 *)(r1 +0)") +__xlated("5: r6 = r1") +__xlated("6: call kernel-function") +__xlated("7: r1 = r6") +__xlated("8: call pc+1") +__xlated("9: exit") +SEC("struct_ops/test_prologue") +__naked int test_prologue(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r6 = r1;" + "call %[bpf_kfunc_st_ops_inc10];" + "r1 = r6;" + "call subprog;" + "exit;" + : + : __imm(bpf_kfunc_st_ops_inc10) + : __clobber_all); +} + +__success +/* save __u64 *ctx to stack */ +__xlated("0: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("1: r1 = *(u64 *)(r1 +0)") +__xlated("2: r6 = r1") +__xlated("3: call kernel-function") +__xlated("4: r1 = r6") +__xlated("5: call pc+") +/* epilogue */ +__xlated("6: r1 = *(u64 *)(r10 -8)") +__xlated("7: r1 = *(u64 *)(r1 +0)") +__xlated("8: r6 = *(u64 *)(r1 +0)") +__xlated("9: r6 += 10000") +__xlated("10: *(u64 *)(r1 +0) = r6") +__xlated("11: r0 = r6") +__xlated("12: r0 *= 2") +__xlated("13: exit") +SEC("struct_ops/test_epilogue") +__naked int test_epilogue(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r6 = r1;" + "call %[bpf_kfunc_st_ops_inc10];" + "r1 = r6;" + "call subprog;" + "exit;" + : + : __imm(bpf_kfunc_st_ops_inc10) + : __clobber_all); +} + +__success +/* prologue */ +__xlated("0: r6 = *(u64 *)(r1 +0)") +__xlated("1: r7 = *(u64 *)(r6 +0)") +__xlated("2: r7 += 1000") +__xlated("3: *(u64 *)(r6 +0) = r7") +/* save __u64 *ctx to stack */ +__xlated("4: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("5: r1 = *(u64 *)(r1 +0)") +__xlated("6: r6 = r1") +__xlated("7: call kernel-function") +__xlated("8: r1 = r6") +__xlated("9: call pc+") +/* epilogue */ +__xlated("10: r1 = *(u64 *)(r10 -8)") +__xlated("11: r1 = *(u64 *)(r1 +0)") +__xlated("12: r6 = *(u64 *)(r1 +0)") +__xlated("13: r6 += 10000") +__xlated("14: *(u64 *)(r1 +0) = r6") +__xlated("15: r0 = r6") +__xlated("16: r0 *= 2") +__xlated("17: exit") +SEC("struct_ops/test_pro_epilogue") +__naked int test_pro_epilogue(void) +{ + asm volatile ( + "r1 = *(u64 *)(r1 +0);" + "r6 = r1;" + "call %[bpf_kfunc_st_ops_inc10];" + "r1 = r6;" + "call subprog;" + "exit;" + : + : __imm(bpf_kfunc_st_ops_inc10) + : __clobber_all); +} + +SEC("syscall") +__retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */ +int syscall_prologue(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_prologue(&args); +} + +SEC("syscall") +__retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ +int syscall_epilogue(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_epilogue(&args); +} + +SEC("syscall") +__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ +int syscall_pro_epilogue(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_pro_epilogue(&args); +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops pro_epilogue = { + .test_prologue = (void *)test_prologue, + .test_epilogue = (void *)test_epilogue, + .test_pro_epilogue = (void *)test_pro_epilogue, +}; diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c new file mode 100644 index 000000000000..3529e53be355 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +__success +/* prologue */ +__xlated("0: r6 = *(u64 *)(r1 +0)") +__xlated("1: r7 = *(u64 *)(r6 +0)") +__xlated("2: r7 += 1000") +__xlated("3: *(u64 *)(r6 +0) = r7") +/* main prog */ +__xlated("4: if r1 == 0x0 goto pc+5") +__xlated("5: if r1 == 0x1 goto pc+2") +__xlated("6: r1 = 1") +__xlated("7: goto pc-3") +__xlated("8: r1 = 0") +__xlated("9: goto pc-6") +__xlated("10: r0 = 0") +__xlated("11: exit") +SEC("struct_ops/test_prologue_goto_start") +__naked int test_prologue_goto_start(void) +{ + asm volatile ( + "if r1 == 0 goto +5;" + "if r1 == 1 goto +2;" + "r1 = 1;" + "goto -3;" + "r1 = 0;" + "goto -6;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +__success +/* save __u64 *ctx to stack */ +__xlated("0: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("1: if r1 == 0x0 goto pc+5") +__xlated("2: if r1 == 0x1 goto pc+2") +__xlated("3: r1 = 1") +__xlated("4: goto pc-3") +__xlated("5: r1 = 0") +__xlated("6: goto pc-6") +__xlated("7: r0 = 0") +/* epilogue */ +__xlated("8: r1 = *(u64 *)(r10 -8)") +__xlated("9: r1 = *(u64 *)(r1 +0)") +__xlated("10: r6 = *(u64 *)(r1 +0)") +__xlated("11: r6 += 10000") +__xlated("12: *(u64 *)(r1 +0) = r6") +__xlated("13: r0 = r6") +__xlated("14: r0 *= 2") +__xlated("15: exit") +SEC("struct_ops/test_epilogue_goto_start") +__naked int test_epilogue_goto_start(void) +{ + asm volatile ( + "if r1 == 0 goto +5;" + "if r1 == 1 goto +2;" + "r1 = 1;" + "goto -3;" + "r1 = 0;" + "goto -6;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +__success +/* prologue */ +__xlated("0: r6 = *(u64 *)(r1 +0)") +__xlated("1: r7 = *(u64 *)(r6 +0)") +__xlated("2: r7 += 1000") +__xlated("3: *(u64 *)(r6 +0) = r7") +/* save __u64 *ctx to stack */ +__xlated("4: *(u64 *)(r10 -8) = r1") +/* main prog */ +__xlated("5: if r1 == 0x0 goto pc+5") +__xlated("6: if r1 == 0x1 goto pc+2") +__xlated("7: r1 = 1") +__xlated("8: goto pc-3") +__xlated("9: r1 = 0") +__xlated("10: goto pc-6") +__xlated("11: r0 = 0") +/* epilogue */ +__xlated("12: r1 = *(u64 *)(r10 -8)") +__xlated("13: r1 = *(u64 *)(r1 +0)") +__xlated("14: r6 = *(u64 *)(r1 +0)") +__xlated("15: r6 += 10000") +__xlated("16: *(u64 *)(r1 +0) = r6") +__xlated("17: r0 = r6") +__xlated("18: r0 *= 2") +__xlated("19: exit") +SEC("struct_ops/test_pro_epilogue_goto_start") +__naked int test_pro_epilogue_goto_start(void) +{ + asm volatile ( + "if r1 == 0 goto +5;" + "if r1 == 1 goto +2;" + "r1 = 1;" + "goto -3;" + "r1 = 0;" + "goto -6;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC(".struct_ops.link") +struct bpf_testmod_st_ops epilogue_goto_start = { + .test_prologue = (void *)test_prologue_goto_start, + .test_epilogue = (void *)test_epilogue_goto_start, + .test_pro_epilogue = (void *)test_pro_epilogue_goto_start, +}; + +SEC("syscall") +__retval(0) +int syscall_prologue_goto_start(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_prologue(&args); +} + +SEC("syscall") +__retval(20000) /* (EPILOGUE_A [10000]) * 2 */ +int syscall_epilogue_goto_start(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_epilogue(&args); +} + +SEC("syscall") +__retval(22000) /* (PROLOGUE_A [1000] + EPILOGUE_A [10000]) * 2 */ +int syscall_pro_epilogue_goto_start(void *ctx) +{ + struct st_ops_args args = {}; + + return bpf_kfunc_st_ops_test_pro_epilogue(&args); +} diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index b722a1e1ddef..dbd5eee8e25e 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) } SEC("?tc") -__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") +__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}") long rbtree_api_remove_no_drop(void *ctx) { struct bpf_rb_node *res; diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c index 986f96687ae1..39ebef430059 100644 --- a/tools/testing/selftests/bpf/progs/read_vsyscall.c +++ b/tools/testing/selftests/bpf/progs/read_vsyscall.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2024. Huawei Technologies Co., Ltd */ +#include "vmlinux.h" #include <linux/types.h> #include <bpf/bpf_helpers.h> @@ -7,10 +8,15 @@ int target_pid = 0; void *user_ptr = 0; -int read_ret[8]; +int read_ret[9]; char _license[] SEC("license") = "GPL"; +/* + * This is the only kfunc, the others are helpers + */ +int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym; + SEC("fentry/" SYS_PREFIX "sys_nanosleep") int do_probe_read(void *ctx) { @@ -40,6 +46,7 @@ int do_copy_from_user(void *ctx) read_ret[6] = bpf_copy_from_user(buf, sizeof(buf), user_ptr); read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr, bpf_get_current_task_btf(), 0); + read_ret[8] = bpf_copy_from_user_str((char *)buf, sizeof(buf), user_ptr, 0); return 0; } diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c index f8d4b7cfcd68..836c8ab7b908 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) } SEC("?tc") -__failure __regex("Unreleased reference id=4 alloc_insn=[0-9]+") +__failure __msg("Unreleased reference id=4 alloc_insn={{[0-9]+}}") long rbtree_refcounted_node_ref_escapes(void *ctx) { struct node_acquire *n, *m; @@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx) } SEC("?tc") -__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") +__failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}") long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) { struct node_acquire *n, *m; diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index f74459eead26..a5c74d31a244 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -373,7 +373,7 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, len = bpf_probe_read_user_str(&data->payload[off], STROBE_MAX_STR_LEN, value->ptr); /* * if bpf_probe_read_user_str returns error (<0), due to casting to - * unsinged int, it will become big number, so next check is + * unsigned int, it will become big number, so next check is * sufficient to check for errors AND prove to BPF verifier, that * bpf_probe_read_user_str won't return anything bigger than * STROBE_MAX_STR_LEN @@ -557,7 +557,7 @@ static void *read_strobe_meta(struct task_struct *task, return NULL; payload_off = ctx.payload_off; - /* this should not really happen, here only to satisfy verifer */ + /* this should not really happen, here only to satisfy verifier */ if (payload_off > sizeof(data->payload)) payload_off = sizeof(data->payload); #else diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c index 3d3cafdebe72..0f4dfb770c32 100644 --- a/tools/testing/selftests/bpf/progs/syscall.c +++ b/tools/testing/selftests/bpf/progs/syscall.c @@ -8,6 +8,7 @@ #include <linux/btf.h> #include <string.h> #include <errno.h> +#include "bpf_misc.h" char _license[] SEC("license") = "GPL"; @@ -119,7 +120,7 @@ int load_prog(struct args *ctx) static __u64 value = 34; static union bpf_attr prog_load_attr = { .prog_type = BPF_PROG_TYPE_XDP, - .insn_cnt = sizeof(insns) / sizeof(insns[0]), + .insn_cnt = ARRAY_SIZE(insns), }; int ret; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c new file mode 100644 index 000000000000..327ca395e860 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_legacy.h" + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int count = 0; + +static __noinline +int subprog_tail(struct __sk_buff *skb) +{ + bpf_tail_call_static(skb, &jmp_table, 0); + return 0; +} + +SEC("tc") +int entry(struct __sk_buff *skb) +{ + int ret = 1; + + count++; + subprog_tail(skb); + subprog_tail(skb); + + return ret; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c new file mode 100644 index 000000000000..72fd0d577506 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +int classifier_0(struct __sk_buff *skb); +int classifier_1(struct __sk_buff *skb); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 2); + __uint(key_size, sizeof(__u32)); + __array(values, void (void)); +} jmp_table SEC(".maps") = { + .values = { + [0] = (void *) &classifier_0, + [1] = (void *) &classifier_1, + }, +}; + +int count0 = 0; +int count1 = 0; + +static __noinline +int subprog_tail0(struct __sk_buff *skb) +{ + bpf_tail_call_static(skb, &jmp_table, 0); + return 0; +} + +__auxiliary +SEC("tc") +int classifier_0(struct __sk_buff *skb) +{ + count0++; + subprog_tail0(skb); + return 0; +} + +static __noinline +int subprog_tail1(struct __sk_buff *skb) +{ + bpf_tail_call_static(skb, &jmp_table, 1); + return 0; +} + +__auxiliary +SEC("tc") +int classifier_1(struct __sk_buff *skb) +{ + count1++; + subprog_tail1(skb); + return 0; +} + +__success +__retval(33) +SEC("tc") +int tailcall_bpf2bpf_hierarchy_2(struct __sk_buff *skb) +{ + int ret = 0; + + subprog_tail0(skb); + subprog_tail1(skb); + + __sink(ret); + return (count1 << 16) | count0; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c new file mode 100644 index 000000000000..a7fb91cb05b7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +int classifier_0(struct __sk_buff *skb); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __array(values, void (void)); +} jmp_table0 SEC(".maps") = { + .values = { + [0] = (void *) &classifier_0, + }, +}; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __array(values, void (void)); +} jmp_table1 SEC(".maps") = { + .values = { + [0] = (void *) &classifier_0, + }, +}; + +int count = 0; + +static __noinline +int subprog_tail(struct __sk_buff *skb, void *jmp_table) +{ + bpf_tail_call_static(skb, jmp_table, 0); + return 0; +} + +__auxiliary +SEC("tc") +int classifier_0(struct __sk_buff *skb) +{ + count++; + subprog_tail(skb, &jmp_table0); + subprog_tail(skb, &jmp_table1); + return count; +} + +__success +__retval(33) +SEC("tc") +int tailcall_bpf2bpf_hierarchy_3(struct __sk_buff *skb) +{ + int ret = 0; + + bpf_tail_call_static(skb, &jmp_table0, 0); + + __sink(ret); + return ret; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c new file mode 100644 index 000000000000..c87f9ca982d3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Leon Hwang */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int count = 0; + +static __noinline +int subprog_tail(void *ctx) +{ + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +SEC("fentry/dummy") +int BPF_PROG(fentry, struct sk_buff *skb) +{ + count++; + subprog_tail(ctx); + subprog_tail(ctx); + + return 0; +} + + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_freplace.c b/tools/testing/selftests/bpf/progs/tailcall_freplace.c new file mode 100644 index 000000000000..6713b809df44 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_freplace.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int count = 0; + +SEC("freplace") +int entry_freplace(struct __sk_buff *skb) +{ + count++; + bpf_tail_call_static(skb, &jmp_table, 0); + return count; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index 70df695312dc..a55149015063 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -5,6 +5,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "../bpf_experimental.h" #include "task_kfunc_common.h" char _license[] SEC("license") = "GPL"; @@ -142,8 +143,9 @@ int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone SEC("tp_btf/task_newtask") int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) { - struct task_struct *kptr; - struct __tasks_kfunc_map_value *v; + struct task_struct *kptr, *acquired; + struct __tasks_kfunc_map_value *v, *local; + int refcnt, refcnt_after_drop; long status; if (!is_test_kfunc_task()) @@ -167,6 +169,56 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) return 0; } + local = bpf_obj_new(typeof(*local)); + if (!local) { + err = 4; + bpf_task_release(kptr); + return 0; + } + + kptr = bpf_kptr_xchg(&local->task, kptr); + if (kptr) { + err = 5; + bpf_obj_drop(local); + bpf_task_release(kptr); + return 0; + } + + kptr = bpf_kptr_xchg(&local->task, NULL); + if (!kptr) { + err = 6; + bpf_obj_drop(local); + return 0; + } + + /* Stash a copy into local kptr and check if it is released recursively */ + acquired = bpf_task_acquire(kptr); + if (!acquired) { + err = 7; + bpf_obj_drop(local); + bpf_task_release(kptr); + return 0; + } + bpf_probe_read_kernel(&refcnt, sizeof(refcnt), &acquired->rcu_users); + + acquired = bpf_kptr_xchg(&local->task, acquired); + if (acquired) { + err = 8; + bpf_obj_drop(local); + bpf_task_release(kptr); + bpf_task_release(acquired); + return 0; + } + + bpf_obj_drop(local); + + bpf_probe_read_kernel(&refcnt_after_drop, sizeof(refcnt_after_drop), &kptr->rcu_users); + if (refcnt != refcnt_after_drop + 1) { + err = 9; + bpf_task_release(kptr); + return 0; + } + bpf_task_release(kptr); return 0; diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c new file mode 100644 index 000000000000..8a0632c37839 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +__noinline +int subprog(struct __sk_buff *skb) +{ + int ret = 1; + + __sink(ret); + return ret; +} + +SEC("tc") +int entry_tc(struct __sk_buff *skb) +{ + return subprog(skb); +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tc_dummy.c b/tools/testing/selftests/bpf/progs/tc_dummy.c new file mode 100644 index 000000000000..69a3d0dc8787 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tc_dummy.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_legacy.h" + +SEC("tc") +int entry(struct __sk_buff *skb) +{ + return 1; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c index 68466a6ad18c..fb79e6cab932 100644 --- a/tools/testing/selftests/bpf/progs/test_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c @@ -5,8 +5,10 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> +#include <errno.h> #include "bpf_misc.h" +u32 dynamic_sz = 1; int kprobe2_res = 0; int kretprobe2_res = 0; int uprobe_byname_res = 0; @@ -14,11 +16,15 @@ int uretprobe_byname_res = 0; int uprobe_byname2_res = 0; int uretprobe_byname2_res = 0; int uprobe_byname3_sleepable_res = 0; +int uprobe_byname3_str_sleepable_res = 0; int uprobe_byname3_res = 0; int uretprobe_byname3_sleepable_res = 0; +int uretprobe_byname3_str_sleepable_res = 0; int uretprobe_byname3_res = 0; void *user_ptr = 0; +int bpf_copy_from_user_str(void *dst, u32, const void *, u64) __weak __ksym; + SEC("ksyscall/nanosleep") int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem) { @@ -87,11 +93,61 @@ static __always_inline bool verify_sleepable_user_copy(void) return bpf_strncmp(data, sizeof(data), "test_data") == 0; } +static __always_inline bool verify_sleepable_user_copy_str(void) +{ + int ret; + char data_long[20]; + char data_long_pad[20]; + char data_long_err[20]; + char data_short[4]; + char data_short_pad[4]; + + ret = bpf_copy_from_user_str(data_short, sizeof(data_short), user_ptr, 0); + + if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4) + return false; + + ret = bpf_copy_from_user_str(data_short_pad, sizeof(data_short_pad), user_ptr, BPF_F_PAD_ZEROS); + + if (bpf_strncmp(data_short, 4, "tes\0") != 0 || ret != 4) + return false; + + /* Make sure this passes the verifier */ + ret = bpf_copy_from_user_str(data_long, dynamic_sz & sizeof(data_long), user_ptr, 0); + + if (ret != 0) + return false; + + ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 0); + + if (bpf_strncmp(data_long, 10, "test_data\0") != 0 || ret != 10) + return false; + + ret = bpf_copy_from_user_str(data_long_pad, sizeof(data_long_pad), user_ptr, BPF_F_PAD_ZEROS); + + if (bpf_strncmp(data_long_pad, 10, "test_data\0") != 0 || ret != 10 || data_long_pad[19] != '\0') + return false; + + ret = bpf_copy_from_user_str(data_long_err, sizeof(data_long_err), (void *)data_long, BPF_F_PAD_ZEROS); + + if (ret > 0 || data_long_err[19] != '\0') + return false; + + ret = bpf_copy_from_user_str(data_long, sizeof(data_long), user_ptr, 2); + + if (ret != -EINVAL) + return false; + + return true; +} + SEC("uprobe.s//proc/self/exe:trigger_func3") int handle_uprobe_byname3_sleepable(struct pt_regs *ctx) { if (verify_sleepable_user_copy()) uprobe_byname3_sleepable_res = 9; + if (verify_sleepable_user_copy_str()) + uprobe_byname3_str_sleepable_res = 10; return 0; } @@ -102,7 +158,7 @@ int handle_uprobe_byname3_sleepable(struct pt_regs *ctx) SEC("uprobe//proc/self/exe:trigger_func3") int handle_uprobe_byname3(struct pt_regs *ctx) { - uprobe_byname3_res = 10; + uprobe_byname3_res = 11; return 0; } @@ -110,14 +166,16 @@ SEC("uretprobe.s//proc/self/exe:trigger_func3") int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx) { if (verify_sleepable_user_copy()) - uretprobe_byname3_sleepable_res = 11; + uretprobe_byname3_sleepable_res = 12; + if (verify_sleepable_user_copy_str()) + uretprobe_byname3_str_sleepable_res = 13; return 0; } SEC("uretprobe//proc/self/exe:trigger_func3") int handle_uretprobe_byname3(struct pt_regs *ctx) { - uretprobe_byname3_res = 12; + uretprobe_byname3_res = 14; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_build_id.c b/tools/testing/selftests/bpf/progs/test_build_id.c new file mode 100644 index 000000000000..32ce59f9aa27 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_build_id.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +struct bpf_stack_build_id stack_sleepable[128]; +int res_sleepable; + +struct bpf_stack_build_id stack_nofault[128]; +int res_nofault; + +SEC("uprobe.multi/./uprobe_multi:uprobe") +int uprobe_nofault(struct pt_regs *ctx) +{ + res_nofault = bpf_get_stack(ctx, stack_nofault, sizeof(stack_nofault), + BPF_F_USER_STACK | BPF_F_USER_BUILD_ID); + + return 0; +} + +SEC("uprobe.multi.s/./uprobe_multi:uprobe") +int uprobe_sleepable(struct pt_regs *ctx) +{ + res_sleepable = bpf_get_stack(ctx, stack_sleepable, sizeof(stack_sleepable), + BPF_F_USER_STACK | BPF_F_USER_BUILD_ID); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c index da54c09e9a15..464515b824b9 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -503,7 +503,7 @@ static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_header * * fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321) * - * clang will substitue a costant for sizeof, which allows the verifier + * clang will substitute a constant for sizeof, which allows the verifier * to track it's value. Based on this, it can figure out the constant * return value, and calling code works while still being "generic" to * IPv4 and IPv6. diff --git a/tools/testing/selftests/bpf/progs/test_core_read_macros.c b/tools/testing/selftests/bpf/progs/test_core_read_macros.c index fd54caa17319..873d85a4739b 100644 --- a/tools/testing/selftests/bpf/progs/test_core_read_macros.c +++ b/tools/testing/selftests/bpf/progs/test_core_read_macros.c @@ -36,7 +36,7 @@ int handler(void *ctx) return 0; /* next pointers for kernel address space have to be initialized from - * BPF side, user-space mmaped addresses are stil user-space addresses + * BPF side, user-space mmaped addresses are still user-space addresses */ k_probe_in.next = &k_probe_in; __builtin_preserve_access_index(({k_core_in.next = &k_core_in;})); diff --git a/tools/testing/selftests/bpf/progs/test_get_xattr.c b/tools/testing/selftests/bpf/progs/test_get_xattr.c index 7eb2a4e5a3e5..66e737720f7c 100644 --- a/tools/testing/selftests/bpf/progs/test_get_xattr.c +++ b/tools/testing/selftests/bpf/progs/test_get_xattr.c @@ -2,6 +2,7 @@ /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ #include "vmlinux.h" +#include <errno.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_kfuncs.h" @@ -9,10 +10,12 @@ char _license[] SEC("license") = "GPL"; __u32 monitored_pid; -__u32 found_xattr; +__u32 found_xattr_from_file; +__u32 found_xattr_from_dentry; static const char expected_value[] = "hello"; -char value[32]; +char value1[32]; +char value2[32]; SEC("lsm.s/file_open") int BPF_PROG(test_file_open, struct file *f) @@ -25,13 +28,37 @@ int BPF_PROG(test_file_open, struct file *f) if (pid != monitored_pid) return 0; - bpf_dynptr_from_mem(value, sizeof(value), 0, &value_ptr); + bpf_dynptr_from_mem(value1, sizeof(value1), 0, &value_ptr); ret = bpf_get_file_xattr(f, "user.kfuncs", &value_ptr); if (ret != sizeof(expected_value)) return 0; - if (bpf_strncmp(value, ret, expected_value)) + if (bpf_strncmp(value1, ret, expected_value)) return 0; - found_xattr = 1; + found_xattr_from_file = 1; return 0; } + +SEC("lsm.s/inode_getxattr") +int BPF_PROG(test_inode_getxattr, struct dentry *dentry, char *name) +{ + struct bpf_dynptr value_ptr; + __u32 pid; + int ret; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid != monitored_pid) + return 0; + + bpf_dynptr_from_mem(value2, sizeof(value2), 0, &value_ptr); + + ret = bpf_get_dentry_xattr(dentry, "user.kfuncs", &value_ptr); + if (ret != sizeof(expected_value)) + return 0; + if (bpf_strncmp(value2, ret, expected_value)) + return 0; + found_xattr_from_dentry = 1; + + /* return non-zero to fail getxattr from user space */ + return -EINVAL; +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func15.c b/tools/testing/selftests/bpf/progs/test_global_func15.c index b4e089d6981d..201cc000b3f4 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func15.c +++ b/tools/testing/selftests/bpf/progs/test_global_func15.c @@ -44,7 +44,7 @@ __naked int global_func15_tricky_pruning(void) * case we have a valid 1 stored in R0 register, but in * a branch case we assign some random value to R0. So if * there is something wrong with precision tracking for R0 at - * program exit, we might erronenously prune branch case, + * program exit, we might erroneously prune branch case, * because R0 in fallthrough case is imprecise (and thus any * value is valid from POV of verifier is_state_equal() logic) */ diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c index 1fbb73d3e5d5..a3f220ba7025 100644 --- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c +++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c @@ -3,6 +3,7 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> char _license[] SEC("license") = "GPL"; @@ -15,7 +16,7 @@ const volatile size_t data_array_len; int sum = 0; int array[1]; -/* custom data secton */ +/* custom data section */ int my_array[1] SEC(".data.custom"); /* custom data section which should NOT be resizable, @@ -60,3 +61,18 @@ int data_array_sum(void *ctx) return 0; } + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) +{ + return 0; +} + +struct bpf_testmod_ops { + int (*test_1)(void); +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops st_ops_resize = { + .test_1 = (void *)test_1 +}; diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c index f5ac5f3e8919..568816307f71 100644 --- a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c +++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c @@ -31,6 +31,7 @@ int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode) if (fmode & FMODE_WRITE) return -EACCES; + barrier(); return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c index fc8e8a34a3db..7035fb4d4165 100644 --- a/tools/testing/selftests/bpf/progs/test_rdonly_maps.c +++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c @@ -4,6 +4,7 @@ #include <linux/ptrace.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" const struct { unsigned a[4]; @@ -64,7 +65,7 @@ int full_loop(struct pt_regs *ctx) { /* prevent compiler to optimize everything out */ unsigned * volatile p = (void *)&rdonly_values.a; - int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]); + int i = ARRAY_SIZE(rdonly_values.a); unsigned iters = 0, sum = 0; /* validate verifier can allow full loop as well */ diff --git a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c index 2f0eb1334d65..8ef6b39335b6 100644 --- a/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c +++ b/tools/testing/selftests/bpf/progs/test_sig_in_xattr.c @@ -6,6 +6,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_kfuncs.h" +#include "err.h" char _license[] SEC("license") = "GPL"; @@ -79,5 +80,8 @@ int BPF_PROG(test_file_open, struct file *f) ret = bpf_verify_pkcs7_signature(&digest_ptr, &sig_ptr, trusted_keyring); bpf_key_put(trusted_keyring); + + set_if_not_errno_or_zero(ret, -EFAULT); + return ret; } diff --git a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c deleted file mode 100644 index 37aacc66cd68..000000000000 --- a/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2018 Facebook - -#include <linux/bpf.h> -#include <linux/pkt_cls.h> - -#include <string.h> - -#include <bpf/bpf_helpers.h> - -#define NUM_CGROUP_LEVELS 4 - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __type(key, __u32); - __type(value, __u64); - __uint(max_entries, NUM_CGROUP_LEVELS); -} cgroup_ids SEC(".maps"); - -static __always_inline void log_nth_level(struct __sk_buff *skb, __u32 level) -{ - __u64 id; - - /* [1] &level passed to external function that may change it, it's - * incompatible with loop unroll. - */ - id = bpf_skb_ancestor_cgroup_id(skb, level); - bpf_map_update_elem(&cgroup_ids, &level, &id, 0); -} - -SEC("cgroup_id_logger") -int log_cgroup_id(struct __sk_buff *skb) -{ - /* Loop unroll can't be used here due to [1]. Unrolling manually. - * Number of calls should be in sync with NUM_CGROUP_LEVELS. - */ - log_nth_level(skb, 0); - log_nth_level(skb, 1); - log_nth_level(skb, 2); - log_nth_level(skb, 3); - - return TC_ACT_OK; -} - -char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index 3f5abcf3ff13..32127f1cd687 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -26,6 +26,18 @@ */ #define ASSIGNED_ADDR_VETH1 0xac1001c8 +struct bpf_fou_encap___local { + __be16 sport; + __be16 dport; +} __attribute__((preserve_access_index)); + +enum bpf_fou_encap_type___local { + FOU_BPF_ENCAP_FOU___local, + FOU_BPF_ENCAP_GUE___local, +}; + +struct bpf_fou_encap; + int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx, struct bpf_fou_encap *encap, int type) __ksym; int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx, @@ -745,7 +757,7 @@ SEC("tc") int ipip_gue_set_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key = {}; - struct bpf_fou_encap encap = {}; + struct bpf_fou_encap___local encap = {}; void *data = (void *)(long)skb->data; struct iphdr *iph = data; void *data_end = (void *)(long)skb->data_end; @@ -769,7 +781,9 @@ int ipip_gue_set_tunnel(struct __sk_buff *skb) encap.sport = 0; encap.dport = bpf_htons(5555); - ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_GUE); + ret = bpf_skb_set_fou_encap(skb, (struct bpf_fou_encap *)&encap, + bpf_core_enum_value(enum bpf_fou_encap_type___local, + FOU_BPF_ENCAP_GUE___local)); if (ret < 0) { log_err(ret); return TC_ACT_SHOT; @@ -782,7 +796,7 @@ SEC("tc") int ipip_fou_set_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key = {}; - struct bpf_fou_encap encap = {}; + struct bpf_fou_encap___local encap = {}; void *data = (void *)(long)skb->data; struct iphdr *iph = data; void *data_end = (void *)(long)skb->data_end; @@ -806,7 +820,8 @@ int ipip_fou_set_tunnel(struct __sk_buff *skb) encap.sport = 0; encap.dport = bpf_htons(5555); - ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_FOU); + ret = bpf_skb_set_fou_encap(skb, (struct bpf_fou_encap *)&encap, + FOU_BPF_ENCAP_FOU___local); if (ret < 0) { log_err(ret); return TC_ACT_SHOT; @@ -820,7 +835,7 @@ int ipip_encap_get_tunnel(struct __sk_buff *skb) { int ret; struct bpf_tunnel_key key = {}; - struct bpf_fou_encap encap = {}; + struct bpf_fou_encap___local encap = {}; ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); if (ret < 0) { @@ -828,7 +843,7 @@ int ipip_encap_get_tunnel(struct __sk_buff *skb) return TC_ACT_SHOT; } - ret = bpf_skb_get_fou_encap(skb, &encap); + ret = bpf_skb_get_fou_encap(skb, (struct bpf_fou_encap *)&encap); if (ret < 0) { log_err(ret); return TC_ACT_SHOT; diff --git a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c index f42e9f3831a1..12034a73ee2d 100644 --- a/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c +++ b/tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c @@ -11,6 +11,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_kfuncs.h" +#include "err.h" #define MAX_DATA_SIZE (1024 * 1024) #define MAX_SIG_SIZE 1024 @@ -55,12 +56,12 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size) ret = bpf_probe_read_kernel(&value, sizeof(value), &attr->value); if (ret) - return ret; + goto out; ret = bpf_copy_from_user(data_val, sizeof(struct data), (void *)(unsigned long)value); if (ret) - return ret; + goto out; if (data_val->data_len > sizeof(data_val->data)) return -EINVAL; @@ -84,5 +85,8 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size) bpf_key_put(trusted_keyring); +out: + set_if_not_errno_or_zero(ret, -EFAULT); + return ret; } diff --git a/tools/testing/selftests/bpf/progs/token_lsm.c b/tools/testing/selftests/bpf/progs/token_lsm.c index e4d59b6ba743..a6002d073b1b 100644 --- a/tools/testing/selftests/bpf/progs/token_lsm.c +++ b/tools/testing/selftests/bpf/progs/token_lsm.c @@ -8,8 +8,8 @@ char _license[] SEC("license") = "GPL"; int my_pid; -bool reject_capable; -bool reject_cmd; +int reject_capable; +int reject_cmd; SEC("lsm/bpf_token_capable") int BPF_PROG(token_capable, struct bpf_token *token, int cap) diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c index 2619ed193c65..044a6d78923e 100644 --- a/tools/testing/selftests/bpf/progs/trigger_bench.c +++ b/tools/testing/selftests/bpf/progs/trigger_bench.c @@ -32,6 +32,13 @@ int bench_trigger_uprobe(void *ctx) return 0; } +SEC("?uprobe.multi") +int bench_trigger_uprobe_multi(void *ctx) +{ + inc_counter(); + return 0; +} + const volatile int batch_iters = 0; SEC("?raw_tp") diff --git a/tools/testing/selftests/bpf/progs/unsupported_ops.c b/tools/testing/selftests/bpf/progs/unsupported_ops.c new file mode 100644 index 000000000000..9180365a3568 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/unsupported_ops.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +SEC("struct_ops/unsupported_ops") +__failure +__msg("attach to unsupported member unsupported_ops of struct bpf_testmod_ops") +int BPF_PROG(unsupported_ops) +{ + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod = { + .unsupported_ops = (void *)unsupported_ops, +}; diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c new file mode 100644 index 000000000000..7e0fdcbbd242 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <stdbool.h> +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__u64 uprobe_result[4]; + +SEC("uprobe.multi") +int uprobe_0(struct pt_regs *ctx) +{ + uprobe_result[0]++; + return 0; +} + +SEC("uprobe.multi") +int uprobe_1(struct pt_regs *ctx) +{ + uprobe_result[1]++; + return 0; +} + +SEC("uprobe.multi") +int uprobe_2(struct pt_regs *ctx) +{ + uprobe_result[2]++; + return 0; +} + +SEC("uprobe.multi") +int uprobe_3(struct pt_regs *ctx) +{ + uprobe_result[3]++; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c b/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c new file mode 100644 index 000000000000..67fcbad36661 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_pid_filter.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +__u32 pids[3]; +__u32 test[3][2]; + +static void update_pid(int idx) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + + if (pid == pids[idx]) + test[idx][0]++; + else + test[idx][1]++; +} + +SEC("uprobe.multi") +int uprobe_multi_0(struct pt_regs *ctx) +{ + update_pid(0); + return 0; +} + +SEC("uprobe.multi") +int uprobe_multi_1(struct pt_regs *ctx) +{ + update_pid(1); + return 0; +} + +SEC("uprobe.multi") +int uprobe_multi_2(struct pt_regs *ctx) +{ + update_pid(2); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c index 716113c2bce2..f4da4d508ddb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c +++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c @@ -87,7 +87,7 @@ int bits_memalloc(void) int *bit; __builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */ - bpf_for_each(bits, bit, &data[0], sizeof(data) / sizeof(u64)) + bpf_for_each(bits, bit, &data[0], ARRAY_SIZE(data)) nr++; return nr; } diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c new file mode 100644 index 000000000000..9da97d2efcd9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c @@ -0,0 +1,900 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" +#include <stdbool.h> +#include "bpf_kfuncs.h" + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 8") +__xlated("4: r5 = 5") +__xlated("5: w0 = ") +__xlated("6: r0 = &(void __percpu *)(r0)") +__xlated("7: r0 = *(u32 *)(r0 +0)") +__xlated("8: exit") +__success +__naked void simple(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = 2;" + "r3 = 3;" + "r4 = 4;" + "r5 = 5;" + "*(u64 *)(r10 - 16) = r1;" + "*(u64 *)(r10 - 24) = r2;" + "*(u64 *)(r10 - 32) = r3;" + "*(u64 *)(r10 - 40) = r4;" + "*(u64 *)(r10 - 48) = r5;" + "call %[bpf_get_smp_processor_id];" + "r5 = *(u64 *)(r10 - 48);" + "r4 = *(u64 *)(r10 - 40);" + "r3 = *(u64 *)(r10 - 32);" + "r2 = *(u64 *)(r10 - 24);" + "r1 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +/* The logic for detecting and verifying bpf_fastcall pattern is the same for + * any arch, however x86 differs from arm64 or riscv64 in a way + * bpf_get_smp_processor_id is rewritten: + * - on x86 it is done by verifier + * - on arm64 and riscv64 it is done by jit + * + * Which leads to different xlated patterns for different archs: + * - on x86 the call is expanded as 3 instructions + * - on arm64 and riscv64 the call remains as is + * (but spills/fills are still removed) + * + * It is really desirable to check instruction indexes in the xlated + * patterns, so add this canary test to check that function rewrite by + * jit is correctly processed by bpf_fastcall logic, keep the rest of the + * tests as x86. + */ +SEC("raw_tp") +__arch_arm64 +__arch_riscv64 +__xlated("0: r1 = 1") +__xlated("1: call bpf_get_smp_processor_id") +__xlated("2: exit") +__success +__naked void canary_arm64_riscv64(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("3: exit") +__success +__naked void canary_zero_spills(void) +{ + asm volatile ( + "call %[bpf_get_smp_processor_id];" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 16") +__xlated("1: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r2 = *(u64 *)(r10 -16)") +__success +__naked void wrong_reg_in_pattern1(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r2 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -16) = r6") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r6 = *(u64 *)(r10 -16)") +__success +__naked void wrong_reg_in_pattern2(void) +{ + asm volatile ( + "r6 = 1;" + "*(u64 *)(r10 - 16) = r6;" + "call %[bpf_get_smp_processor_id];" + "r6 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -16) = r0") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r0 = *(u64 *)(r10 -16)") +__success +__naked void wrong_reg_in_pattern3(void) +{ + asm volatile ( + "r0 = 1;" + "*(u64 *)(r10 - 16) = r0;" + "call %[bpf_get_smp_processor_id];" + "r0 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("2: *(u64 *)(r2 -16) = r1") +__xlated("...") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("6: r1 = *(u64 *)(r10 -16)") +__success +__naked void wrong_base_in_pattern(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = r10;" + "*(u64 *)(r2 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r2 = 1") +__success +__naked void wrong_insn_in_pattern(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r2 = 1;" + "r1 = *(u64 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("2: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("6: r1 = *(u64 *)(r10 -8)") +__success +__naked void wrong_off_in_pattern1(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u32 *)(r10 -4) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u32 *)(r10 -4)") +__success +__naked void wrong_off_in_pattern2(void) +{ + asm volatile ( + "r1 = 1;" + "*(u32 *)(r10 - 4) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u32 *)(r10 - 4);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u32 *)(r10 -16) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u32 *)(r10 -16)") +__success +__naked void wrong_size_in_pattern(void) +{ + asm volatile ( + "r1 = 1;" + "*(u32 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u32 *)(r10 - 16);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("2: *(u32 *)(r10 -8) = r1") +__xlated("...") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("6: r1 = *(u32 *)(r10 -8)") +__success +__naked void partial_pattern(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = 2;" + "*(u32 *)(r10 - 8) = r1;" + "*(u64 *)(r10 - 16) = r2;" + "call %[bpf_get_smp_processor_id];" + "r2 = *(u64 *)(r10 - 16);" + "r1 = *(u32 *)(r10 - 8);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("0: r1 = 1") +__xlated("1: r2 = 2") +/* not patched, spills for -8, -16 not removed */ +__xlated("2: *(u64 *)(r10 -8) = r1") +__xlated("3: *(u64 *)(r10 -16) = r2") +__xlated("...") +__xlated("5: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("7: r2 = *(u64 *)(r10 -16)") +__xlated("8: r1 = *(u64 *)(r10 -8)") +/* patched, spills for -24, -32 removed */ +__xlated("...") +__xlated("10: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("12: exit") +__success +__naked void min_stack_offset(void) +{ + asm volatile ( + "r1 = 1;" + "r2 = 2;" + /* this call won't be patched */ + "*(u64 *)(r10 - 8) = r1;" + "*(u64 *)(r10 - 16) = r2;" + "call %[bpf_get_smp_processor_id];" + "r2 = *(u64 *)(r10 - 16);" + "r1 = *(u64 *)(r10 - 8);" + /* this call would be patched */ + "*(u64 *)(r10 - 24) = r1;" + "*(u64 *)(r10 - 32) = r2;" + "call %[bpf_get_smp_processor_id];" + "r2 = *(u64 *)(r10 - 32);" + "r1 = *(u64 *)(r10 - 24);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__success +__naked void bad_fixed_read(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "r1 = *(u64 *)(r1 - 0);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__success +__naked void bad_fixed_write(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "*(u64 *)(r1 - 0) = r1;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("6: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("8: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("10: r1 = *(u64 *)(r10 -16)") +__success +__naked void bad_varying_read(void) +{ + asm volatile ( + "r6 = *(u64 *)(r1 + 0);" /* random scalar value */ + "r6 &= 0x7;" /* r6 range [0..7] */ + "r6 += 0x2;" /* r6 range [2..9] */ + "r7 = 0;" + "r7 -= r6;" /* r7 range [-9..-2] */ + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "r1 = r10;" + "r1 += r7;" + "r1 = *(u8 *)(r1 - 0);" /* touches slot [-16..-9] where spills are stored */ + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("6: *(u64 *)(r10 -16) = r1") +__xlated("...") +__xlated("8: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("10: r1 = *(u64 *)(r10 -16)") +__success +__naked void bad_varying_write(void) +{ + asm volatile ( + "r6 = *(u64 *)(r1 + 0);" /* random scalar value */ + "r6 &= 0x7;" /* r6 range [0..7] */ + "r6 += 0x2;" /* r6 range [2..9] */ + "r7 = 0;" + "r7 -= r6;" /* r7 range [-9..-2] */ + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "r1 = r10;" + "r1 += r7;" + "*(u8 *)(r1 - 0) = r7;" /* touches slot [-16..-9] where spills are stored */ + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__success +__naked void bad_write_in_subprog(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "call bad_write_in_subprog_aux;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +__used +__naked static void bad_write_in_subprog_aux(void) +{ + asm volatile ( + "r0 = 1;" + "*(u64 *)(r1 - 0) = r0;" /* invalidates bpf_fastcall contract for caller: */ + "exit;" /* caller stack at -8 used outside of the pattern */ + ::: __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__success +__naked void bad_helper_write(void) +{ + asm volatile ( + "r1 = 1;" + /* bpf_fastcall pattern with stack offset -8 */ + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "r2 = 1;" + "r3 = 42;" + /* read dst is fp[-8], thus bpf_fastcall rewrite not applied */ + "call %[bpf_probe_read_kernel];" + "exit;" + : + : __imm(bpf_get_smp_processor_id), + __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +/* main, not patched */ +__xlated("1: *(u64 *)(r10 -8) = r1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("5: r1 = *(u64 *)(r10 -8)") +__xlated("...") +__xlated("9: call pc+1") +__xlated("...") +__xlated("10: exit") +/* subprogram, patched */ +__xlated("11: r1 = 1") +__xlated("...") +__xlated("13: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("15: exit") +__success +__naked void invalidate_one_subprog(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "r1 = r10;" + "r1 += -8;" + "r1 = *(u64 *)(r1 - 0);" + "call invalidate_one_subprog_aux;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +__used +__naked static void invalidate_one_subprog_aux(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +/* main */ +__xlated("0: r1 = 1") +__xlated("...") +__xlated("2: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("4: call pc+1") +__xlated("5: exit") +/* subprogram */ +__xlated("6: r1 = 1") +__xlated("...") +__xlated("8: r0 = &(void __percpu *)(r0)") +__xlated("...") +__xlated("10: *(u64 *)(r10 -16) = r1") +__xlated("11: exit") +__success +__naked void subprogs_use_independent_offsets(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "call subprogs_use_independent_offsets_aux;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +__used +__naked static void subprogs_use_independent_offsets_aux(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 24) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 24);" + "*(u64 *)(r10 - 16) = r1;" + "exit;" + : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 8") +__xlated("2: r0 = &(void __percpu *)(r0)") +__success +__naked void helper_call_does_not_prevent_bpf_fastcall(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_prandom_u32];" + "r1 = *(u64 *)(r10 - 8);" + "exit;" + : + : __imm(bpf_get_smp_processor_id), + __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 16") +/* may_goto counter at -16 */ +__xlated("0: *(u64 *)(r10 -16) =") +__xlated("1: r1 = 1") +__xlated("...") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("...") +/* may_goto expansion starts */ +__xlated("5: r11 = *(u64 *)(r10 -16)") +__xlated("6: if r11 == 0x0 goto pc+3") +__xlated("7: r11 -= 1") +__xlated("8: *(u64 *)(r10 -16) = r11") +/* may_goto expansion ends */ +__xlated("9: *(u64 *)(r10 -8) = r1") +__xlated("10: exit") +__success +__naked void may_goto_interaction(void) +{ + asm volatile ( + "r1 = 1;" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + ".8byte %[may_goto];" + /* just touch some stack at -8 */ + "*(u64 *)(r10 - 8) = r1;" + "exit;" + : + : __imm(bpf_get_smp_processor_id), + __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0)) + : __clobber_all); +} + +__used +__naked static void dummy_loop_callback(void) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 32+0") +__xlated("2: r1 = 1") +__xlated("3: w0 =") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("5: r0 = *(u32 *)(r0 +0)") +/* bpf_loop params setup */ +__xlated("6: r2 =") +__xlated("7: r3 = 0") +__xlated("8: r4 = 0") +__xlated("...") +/* ... part of the inlined bpf_loop */ +__xlated("12: *(u64 *)(r10 -32) = r6") +__xlated("13: *(u64 *)(r10 -24) = r7") +__xlated("14: *(u64 *)(r10 -16) = r8") +__xlated("...") +__xlated("21: call pc+8") /* dummy_loop_callback */ +/* ... last insns of the bpf_loop_interaction1 */ +__xlated("...") +__xlated("28: r0 = 0") +__xlated("29: exit") +/* dummy_loop_callback */ +__xlated("30: r0 = 0") +__xlated("31: exit") +__success +__naked int bpf_loop_interaction1(void) +{ + asm volatile ( + "r1 = 1;" + /* bpf_fastcall stack region at -16, but could be removed */ + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "r2 = %[dummy_loop_callback];" + "r3 = 0;" + "r4 = 0;" + "call %[bpf_loop];" + "r0 = 0;" + "exit;" + : + : __imm_ptr(dummy_loop_callback), + __imm(bpf_get_smp_processor_id), + __imm(bpf_loop) + : __clobber_common + ); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) __msg("stack depth 40+0") +/* call bpf_get_smp_processor_id */ +__xlated("2: r1 = 42") +__xlated("3: w0 =") +__xlated("4: r0 = &(void __percpu *)(r0)") +__xlated("5: r0 = *(u32 *)(r0 +0)") +/* call bpf_get_prandom_u32 */ +__xlated("6: *(u64 *)(r10 -16) = r1") +__xlated("7: call") +__xlated("8: r1 = *(u64 *)(r10 -16)") +__xlated("...") +/* ... part of the inlined bpf_loop */ +__xlated("15: *(u64 *)(r10 -40) = r6") +__xlated("16: *(u64 *)(r10 -32) = r7") +__xlated("17: *(u64 *)(r10 -24) = r8") +__success +__naked int bpf_loop_interaction2(void) +{ + asm volatile ( + "r1 = 42;" + /* bpf_fastcall stack region at -16, cannot be removed */ + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 16);" + "*(u64 *)(r10 - 16) = r1;" + "call %[bpf_get_prandom_u32];" + "r1 = *(u64 *)(r10 - 16);" + "r2 = %[dummy_loop_callback];" + "r3 = 0;" + "r4 = 0;" + "call %[bpf_loop];" + "r0 = 0;" + "exit;" + : + : __imm_ptr(dummy_loop_callback), + __imm(bpf_get_smp_processor_id), + __imm(bpf_get_prandom_u32), + __imm(bpf_loop) + : __clobber_common + ); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) +__msg("stack depth 512+0") +/* just to print xlated version when debugging */ +__xlated("r0 = &(void __percpu *)(r0)") +__success +/* cumulative_stack_depth() stack usage is MAX_BPF_STACK, + * called subprogram uses an additional slot for bpf_fastcall spill/fill, + * since bpf_fastcall spill/fill could be removed the program still fits + * in MAX_BPF_STACK and should be accepted. + */ +__naked int cumulative_stack_depth(void) +{ + asm volatile( + "r1 = 42;" + "*(u64 *)(r10 - %[max_bpf_stack]) = r1;" + "call cumulative_stack_depth_subprog;" + "exit;" + : + : __imm_const(max_bpf_stack, MAX_BPF_STACK) + : __clobber_all + ); +} + +__used +__naked static void cumulative_stack_depth_subprog(void) +{ + asm volatile ( + "*(u64 *)(r10 - 8) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - 8);" + "exit;" + :: __imm(bpf_get_smp_processor_id) : __clobber_all); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) +__msg("stack depth 512") +__xlated("0: r1 = 42") +__xlated("1: *(u64 *)(r10 -512) = r1") +__xlated("2: w0 = ") +__xlated("3: r0 = &(void __percpu *)(r0)") +__xlated("4: r0 = *(u32 *)(r0 +0)") +__xlated("5: exit") +__success +__naked int bpf_fastcall_max_stack_ok(void) +{ + asm volatile( + "r1 = 42;" + "*(u64 *)(r10 - %[max_bpf_stack]) = r1;" + "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" + "exit;" + : + : __imm_const(max_bpf_stack, MAX_BPF_STACK), + __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8), + __imm(bpf_get_smp_processor_id) + : __clobber_all + ); +} + +SEC("raw_tp") +__arch_x86_64 +__log_level(4) +__msg("stack depth 520") +__failure +__naked int bpf_fastcall_max_stack_fail(void) +{ + asm volatile( + "r1 = 42;" + "*(u64 *)(r10 - %[max_bpf_stack]) = r1;" + "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" + "call %[bpf_get_smp_processor_id];" + "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" + /* call to prandom blocks bpf_fastcall rewrite */ + "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;" + "call %[bpf_get_prandom_u32];" + "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);" + "exit;" + : + : __imm_const(max_bpf_stack, MAX_BPF_STACK), + __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8), + __imm(bpf_get_smp_processor_id), + __imm(bpf_get_prandom_u32) + : __clobber_all + ); +} + +SEC("cgroup/getsockname_unix") +__xlated("0: r2 = 1") +/* bpf_cast_to_kern_ctx is replaced by a single assignment */ +__xlated("1: r0 = r1") +__xlated("2: r0 = r2") +__xlated("3: exit") +__success +__naked void kfunc_bpf_cast_to_kern_ctx(void) +{ + asm volatile ( + "r2 = 1;" + "*(u64 *)(r10 - 32) = r2;" + "call %[bpf_cast_to_kern_ctx];" + "r2 = *(u64 *)(r10 - 32);" + "r0 = r2;" + "exit;" + : + : __imm(bpf_cast_to_kern_ctx) + : __clobber_all); +} + +SEC("raw_tp") +__xlated("3: r3 = 1") +/* bpf_rdonly_cast is replaced by a single assignment */ +__xlated("4: r0 = r1") +__xlated("5: r0 = r3") +void kfunc_bpf_rdonly_cast(void) +{ + asm volatile ( + "r2 = %[btf_id];" + "r3 = 1;" + "*(u64 *)(r10 - 32) = r3;" + "call %[bpf_rdonly_cast];" + "r3 = *(u64 *)(r10 - 32);" + "r0 = r3;" + : + : __imm(bpf_rdonly_cast), + [btf_id]"r"(bpf_core_type_id_kernel(union bpf_attr)) + : __clobber_common); +} + +/* BTF FUNC records are not generated for kfuncs referenced + * from inline assembly. These records are necessary for + * libbpf to link the program. The function below is a hack + * to ensure that BTF FUNC records are generated. + */ +void kfunc_root(void) +{ + bpf_cast_to_kern_ctx(0); + bpf_rdonly_cast(0, 0); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_const.c b/tools/testing/selftests/bpf/progs/verifier_const.c new file mode 100644 index 000000000000..2e533d7eec2f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_const.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Isovalent */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +const volatile long foo = 42; +long bar; +long bart = 96; + +SEC("tc/ingress") +__description("rodata/strtol: write rejected") +__failure __msg("write into map forbidden") +int tcx1(struct __sk_buff *skb) +{ + char buff[] = { '8', '4', '\0' }; + bpf_strtol(buff, sizeof(buff), 0, (long *)&foo); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("bss/strtol: write accepted") +__success +int tcx2(struct __sk_buff *skb) +{ + char buff[] = { '8', '4', '\0' }; + bpf_strtol(buff, sizeof(buff), 0, &bar); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("data/strtol: write accepted") +__success +int tcx3(struct __sk_buff *skb) +{ + char buff[] = { '8', '4', '\0' }; + bpf_strtol(buff, sizeof(buff), 0, &bart); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("rodata/mtu: write rejected") +__failure __msg("write into map forbidden") +int tcx4(struct __sk_buff *skb) +{ + bpf_check_mtu(skb, skb->ifindex, (__u32 *)&foo, 0, 0); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("bss/mtu: write accepted") +__success +int tcx5(struct __sk_buff *skb) +{ + bpf_check_mtu(skb, skb->ifindex, (__u32 *)&bar, 0, 0); + return TCX_PASS; +} + +SEC("tc/ingress") +__description("data/mtu: write accepted") +__success +int tcx6(struct __sk_buff *skb) +{ + bpf_check_mtu(skb, skb->ifindex, (__u32 *)&bart, 0, 0); + return TCX_PASS; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c index a9fc30ed4d73..20904cd2baa2 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c @@ -7,6 +7,7 @@ #include "bpf_misc.h" #include "xdp_metadata.h" #include "bpf_kfuncs.h" +#include "err.h" /* The compiler may be able to detect the access to uninitialized memory in the routines performing out of bound memory accesses and @@ -331,7 +332,11 @@ SEC("?lsm/bpf") __success __log_level(2) int BPF_PROG(arg_tag_ctx_lsm) { - return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); + int ret; + + ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); + set_if_not_errno_or_zero(ret, -1); + return ret; } SEC("?struct_ops/test_1") diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c index 9fc3fae5cd83..5f2efb895edb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c +++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c @@ -6,9 +6,8 @@ #include "bpf_misc.h" SEC("socket") -__description("ARG_PTR_TO_LONG uninitialized") +__description("arg pointer to long uninitialized") __success -__failure_unpriv __msg_unpriv("invalid indirect read from stack R4 off -16+0 size 8") __naked void arg_ptr_to_long_uninitialized(void) { asm volatile (" \ @@ -35,10 +34,8 @@ __naked void arg_ptr_to_long_uninitialized(void) } SEC("socket") -__description("ARG_PTR_TO_LONG half-uninitialized") -/* in privileged mode reads from uninitialized stack locations are permitted */ -__success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8") +__description("arg pointer to long half-uninitialized") +__success __retval(0) __naked void ptr_to_long_half_uninitialized(void) { @@ -67,7 +64,7 @@ __naked void ptr_to_long_half_uninitialized(void) } SEC("cgroup/sysctl") -__description("ARG_PTR_TO_LONG misaligned") +__description("arg pointer to long misaligned") __failure __msg("misaligned stack access off 0+-20+0 size 8") __naked void arg_ptr_to_long_misaligned(void) { @@ -98,7 +95,7 @@ __naked void arg_ptr_to_long_misaligned(void) } SEC("cgroup/sysctl") -__description("ARG_PTR_TO_LONG size < sizeof(long)") +__description("arg pointer to long size < sizeof(long)") __failure __msg("invalid indirect access to stack R4 off=-4 size=8") __naked void to_long_size_sizeof_long(void) { @@ -127,7 +124,7 @@ __naked void to_long_size_sizeof_long(void) } SEC("cgroup/sysctl") -__description("ARG_PTR_TO_LONG initialized") +__description("arg pointer to long initialized") __success __naked void arg_ptr_to_long_initialized(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c b/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c new file mode 100644 index 000000000000..9f3f2b7db450 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_jit_convergence.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +struct value_t { + long long a[32]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct value_t); +} map_hash SEC(".maps"); + +SEC("socket") +__description("bpf_jit_convergence je <-> jmp") +__success __retval(0) +__arch_x86_64 +__jited(" pushq %rbp") +__naked void btf_jit_convergence_je_jmp(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "if r0 == 0 goto l20_%=;" + "if r0 == 1 goto l21_%=;" + "if r0 == 2 goto l22_%=;" + "if r0 == 3 goto l23_%=;" + "if r0 == 4 goto l24_%=;" + "call %[bpf_get_prandom_u32];" + "call %[bpf_get_prandom_u32];" +"l20_%=:" +"l21_%=:" +"l22_%=:" +"l23_%=:" +"l24_%=:" + "r1 = 0;" + "*(u64 *)(r10 - 8) = r1;" + "r2 = r10;" + "r2 += -8;" + "r1 = %[map_hash] ll;" + "call %[bpf_map_lookup_elem];" + "if r0 == 0 goto l1_%=;" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r5 = r6;" + "if r0 != 0x0 goto l12_%=;" + "call %[bpf_get_prandom_u32];" + "r1 = r0;" + "r2 = r6;" + "if r1 == 0x0 goto l0_%=;" +"l9_%=:" + "r2 = *(u64 *)(r6 + 0x0);" + "r2 += 0x1;" + "*(u64 *)(r6 + 0x0) = r2;" + "goto l1_%=;" +"l12_%=:" + "r1 = r7;" + "r1 += 0x98;" + "r2 = r5;" + "r2 += 0x90;" + "r2 = *(u32 *)(r2 + 0x0);" + "r3 = r7;" + "r3 &= 0x1;" + "r2 *= 0xa8;" + "if r3 == 0x0 goto l2_%=;" + "r1 += r2;" + "r1 -= r7;" + "r1 += 0x8;" + "if r1 <= 0xb20 goto l3_%=;" + "r1 = 0x0;" + "goto l4_%=;" +"l3_%=:" + "r1 += r7;" +"l4_%=:" + "if r1 == 0x0 goto l8_%=;" + "goto l9_%=;" +"l2_%=:" + "r1 += r2;" + "r1 -= r7;" + "r1 += 0x10;" + "if r1 <= 0xb20 goto l6_%=;" + "r1 = 0x0;" + "goto l7_%=;" +"l6_%=:" + "r1 += r7;" +"l7_%=:" + "if r1 == 0x0 goto l8_%=;" + "goto l9_%=;" +"l0_%=:" + "r1 = 0x3;" + "*(u64 *)(r10 - 0x10) = r1;" + "r2 = r1;" + "goto l1_%=;" +"l8_%=:" + "r1 = r5;" + "r1 += 0x4;" + "r1 = *(u32 *)(r1 + 0x0);" + "*(u64 *)(r10 - 0x8) = r1;" +"l1_%=:" + "r0 = 0;" + "exit;" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c b/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c index cb32b0cfc84b..a509cad97e69 100644 --- a/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c +++ b/tools/testing/selftests/bpf/progs/verifier_kfunc_prog_types.c @@ -47,6 +47,22 @@ int BPF_PROG(task_kfunc_syscall) return 0; } +SEC("tracepoint") +__success +int BPF_PROG(task_kfunc_tracepoint) +{ + task_kfunc_load_test(); + return 0; +} + +SEC("perf_event") +__success +int BPF_PROG(task_kfunc_perf_event) +{ + task_kfunc_load_test(); + return 0; +} + /***************** * cgroup kfuncs * *****************/ @@ -85,6 +101,22 @@ int BPF_PROG(cgrp_kfunc_syscall) return 0; } +SEC("tracepoint") +__success +int BPF_PROG(cgrp_kfunc_tracepoint) +{ + cgrp_kfunc_load_test(); + return 0; +} + +SEC("perf_event") +__success +int BPF_PROG(cgrp_kfunc_perf_event) +{ + cgrp_kfunc_load_test(); + return 0; +} + /****************** * cpumask kfuncs * ******************/ @@ -120,3 +152,19 @@ int BPF_PROG(cpumask_kfunc_syscall) cpumask_kfunc_load_test(); return 0; } + +SEC("tracepoint") +__success +int BPF_PROG(cpumask_kfunc_tracepoint) +{ + cpumask_kfunc_load_test(); + return 0; +} + +SEC("perf_event") +__success +int BPF_PROG(cpumask_kfunc_perf_event) +{ + cpumask_kfunc_load_test(); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c index d4427d8e1217..52edee41caf6 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c @@ -144,6 +144,118 @@ __naked void ldsx_s32_range(void) : __clobber_all); } +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_1(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[xdp_md_data]);" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)) + : __clobber_all); +} + +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_2(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[xdp_md_data_end]);" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("LDSX, xdp s32 xdp_md->data_meta") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_3(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[xdp_md_data_meta]);" + "r0 = 0;" + "exit;" + : + : __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_4(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_5(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tcx/ingress") +__description("LDSX, tcx s32 __sk_buff->data_meta") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_6(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data_meta]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data_meta, offsetof(struct __sk_buff, data_meta)) + : __clobber_all); +} + +SEC("flow_dissector") +__description("LDSX, flow_dissector s32 __sk_buff->data") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_7(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("flow_dissector") +__description("LDSX, flow_dissector s32 __sk_buff->data_end") +__failure __msg("invalid bpf_context access") +__naked void ldsx_ctx_8(void) +{ + asm volatile ( + "r2 = *(s32 *)(r1 + %[sk_buff_data_end]);" + "r0 = 0;" + "exit;" + : + : __imm_const(sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + #else SEC("socket") diff --git a/tools/testing/selftests/bpf/progs/verifier_lsm.c b/tools/testing/selftests/bpf/progs/verifier_lsm.c new file mode 100644 index 000000000000..32e5e779cb96 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_lsm.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("lsm/file_alloc_security") +__description("lsm bpf prog with -4095~0 retval. test 1") +__success +__naked int errno_zero_retval_test1(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_alloc_security") +__description("lsm bpf prog with -4095~0 retval. test 2") +__success +__naked int errno_zero_retval_test2(void *ctx) +{ + asm volatile ( + "r0 = -4095;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_mprotect") +__description("lsm bpf prog with -4095~0 retval. test 4") +__failure __msg("R0 has smin=-4096 smax=-4096 should have been in [-4095, 0]") +__naked int errno_zero_retval_test4(void *ctx) +{ + asm volatile ( + "r0 = -4096;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_mprotect") +__description("lsm bpf prog with -4095~0 retval. test 5") +__failure __msg("R0 has smin=4096 smax=4096 should have been in [-4095, 0]") +__naked int errno_zero_retval_test5(void *ctx) +{ + asm volatile ( + "r0 = 4096;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_mprotect") +__description("lsm bpf prog with -4095~0 retval. test 6") +__failure __msg("R0 has smin=1 smax=1 should have been in [-4095, 0]") +__naked int errno_zero_retval_test6(void *ctx) +{ + asm volatile ( + "r0 = 1;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/audit_rule_known") +__description("lsm bpf prog with bool retval. test 1") +__success +__naked int bool_retval_test1(void *ctx) +{ + asm volatile ( + "r0 = 1;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/audit_rule_known") +__description("lsm bpf prog with bool retval. test 2") +__success +__success +__naked int bool_retval_test2(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/audit_rule_known") +__description("lsm bpf prog with bool retval. test 3") +__failure __msg("R0 has smin=-1 smax=-1 should have been in [0, 1]") +__naked int bool_retval_test3(void *ctx) +{ + asm volatile ( + "r0 = -1;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/audit_rule_known") +__description("lsm bpf prog with bool retval. test 4") +__failure __msg("R0 has smin=2 smax=2 should have been in [0, 1]") +__naked int bool_retval_test4(void *ctx) +{ + asm volatile ( + "r0 = 2;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_free_security") +__success +__description("lsm bpf prog with void retval. test 1") +__naked int void_retval_test1(void *ctx) +{ + asm volatile ( + "r0 = -4096;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/file_free_security") +__success +__description("lsm bpf prog with void retval. test 2") +__naked int void_retval_test2(void *ctx) +{ + asm volatile ( + "r0 = 4096;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/getprocattr") +__description("lsm disabled hook: getprocattr") +__failure __msg("points to disabled hook") +__naked int disabled_hook_test1(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/setprocattr") +__description("lsm disabled hook: setprocattr") +__failure __msg("points to disabled hook") +__naked int disabled_hook_test2(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("lsm/ismaclabel") +__description("lsm disabled hook: ismaclabel") +__failure __msg("points to disabled hook") +__naked int disabled_hook_test3(void *ctx) +{ + asm volatile ( + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c index 13b29a7faa71..2ecf77b623e0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c @@ -5,18 +5,27 @@ #include "bpf_misc.h" /* Check that precision marks propagate through scalar IDs. - * Registers r{0,1,2} have the same scalar ID at the moment when r0 is - * marked to be precise, this mark is immediately propagated to r{1,2}. + * Registers r{0,1,2} have the same scalar ID. + * Range information is propagated for scalars sharing same ID. + * Check that precision mark for r0 causes precision marks for r{1,2} + * when range information is propagated for 'if <reg> <op> <const>' insn. */ SEC("socket") __success __log_level(2) -__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10") +/* first 'if' branch */ +__msg("6: (0f) r3 += r0") +__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2 stack=:") __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") -__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") -__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") -__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +/* second 'if' branch */ +__msg("from 4 to 5: ") +__msg("6: (0f) r3 += r0") +__msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10") +__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0") +/* parent state already has r{0,1,2} as precise */ +__msg("frame0: parent state regs= stack=:") __flag(BPF_F_TEST_STATE_FREQ) -__naked void precision_same_state(void) +__naked void linked_regs_bpf_k(void) { asm volatile ( /* r0 = random number up to 0xff */ @@ -25,7 +34,8 @@ __naked void precision_same_state(void) /* tie r0.id == r1.id == r2.id */ "r1 = r0;" "r2 = r0;" - /* force r0 to be precise, this immediately marks r1 and r2 as + "if r1 > 7 goto +0;" + /* force r0 to be precise, this eventually marks r1 and r2 as * precise as well because of shared IDs */ "r3 = r10;" @@ -37,22 +47,17 @@ __naked void precision_same_state(void) : __clobber_all); } -/* Same as precision_same_state, but mark propagates through state / - * parent state boundary. +/* Registers r{0,1,2} share same ID when 'if r1 > ...' insn is processed, + * check that verifier marks r{1,2} as precise while backtracking + * 'if r1 > ...' with r0 already marked. */ SEC("socket") __success __log_level(2) -__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1") -__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10") -__msg("frame0: parent state regs=r0,r1,r2 stack=:") -__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") -__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") -__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") -__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") -__msg("frame0: parent state regs=r0 stack=:") -__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") __flag(BPF_F_TEST_STATE_FREQ) -__naked void precision_cross_state(void) +__msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:") +__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") +__naked void linked_regs_bpf_x_src(void) { asm volatile ( /* r0 = random number up to 0xff */ @@ -61,13 +66,13 @@ __naked void precision_cross_state(void) /* tie r0.id == r1.id == r2.id */ "r1 = r0;" "r2 = r0;" - /* force checkpoint */ - "goto +0;" - /* force r0 to be precise, this immediately marks r1 and r2 as + "r3 = 7;" + "if r1 > r3 goto +0;" + /* force r0 to be precise, this eventually marks r1 and r2 as * precise as well because of shared IDs */ - "r3 = r10;" - "r3 += r0;" + "r4 = r10;" + "r4 += r0;" "r0 = 0;" "exit;" : @@ -75,19 +80,17 @@ __naked void precision_cross_state(void) : __clobber_all); } -/* Same as precision_same_state, but break one of the - * links, note that r1 is absent from regs=... in __msg below. +/* Registers r{0,1,2} share same ID when 'if r1 > r3' insn is processed, + * check that verifier marks r{0,1,2} as precise while backtracking + * 'if r1 > r3' with r3 already marked. */ SEC("socket") __success __log_level(2) -__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10") -__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0") -__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0") -__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0") -__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") -__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") __flag(BPF_F_TEST_STATE_FREQ) -__naked void precision_same_state_broken_link(void) +__msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:") +__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") +__naked void linked_regs_bpf_x_dst(void) { asm volatile ( /* r0 = random number up to 0xff */ @@ -96,15 +99,13 @@ __naked void precision_same_state_broken_link(void) /* tie r0.id == r1.id == r2.id */ "r1 = r0;" "r2 = r0;" - /* break link for r1, this is the only line that differs - * compared to the previous test - */ - "r1 = 0;" - /* force r0 to be precise, this immediately marks r1 and r2 as + "r3 = 7;" + "if r1 > r3 goto +0;" + /* force r0 to be precise, this eventually marks r1 and r2 as * precise as well because of shared IDs */ - "r3 = r10;" - "r3 += r0;" + "r4 = r10;" + "r4 += r3;" "r0 = 0;" "exit;" : @@ -112,22 +113,18 @@ __naked void precision_same_state_broken_link(void) : __clobber_all); } -/* Same as precision_same_state_broken_link, but with state / - * parent state boundary. +/* Same as linked_regs_bpf_k, but break one of the + * links, note that r1 is absent from regs=... in __msg below. */ SEC("socket") __success __log_level(2) -__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10") -__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0") -__msg("frame0: parent state regs=r0,r2 stack=:") -__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") -__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") -__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") -__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("7: (0f) r3 += r0") +__msg("frame0: regs=r0 stack= before 6: (bf) r3 = r10") __msg("frame0: parent state regs=r0 stack=:") -__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__msg("frame0: regs=r0 stack= before 5: (25) if r0 > 0x7 goto pc+0") +__msg("frame0: parent state regs=r0,r2 stack=:") __flag(BPF_F_TEST_STATE_FREQ) -__naked void precision_cross_state_broken_link(void) +__naked void linked_regs_broken_link(void) { asm volatile ( /* r0 = random number up to 0xff */ @@ -136,18 +133,13 @@ __naked void precision_cross_state_broken_link(void) /* tie r0.id == r1.id == r2.id */ "r1 = r0;" "r2 = r0;" - /* force checkpoint, although link between r1 and r{0,2} is - * broken by the next statement current precision tracking - * algorithm can't react to it and propagates mark for r1 to - * the parent state. - */ - "goto +0;" /* break link for r1, this is the only line that differs - * compared to precision_cross_state() + * compared to the previous test */ "r1 = 0;" - /* force r0 to be precise, this immediately marks r1 and r2 as - * precise as well because of shared IDs + "if r0 > 7 goto +0;" + /* force r0 to be precise, + * this eventually marks r2 as precise because of shared IDs */ "r3 = r10;" "r3 += r0;" @@ -164,10 +156,16 @@ __naked void precision_cross_state_broken_link(void) */ SEC("socket") __success __log_level(2) -__msg("11: (0f) r2 += r1") +__msg("12: (0f) r2 += r1") /* Current state */ -__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1") -__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10") +__msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ") +__msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10") +__msg("frame2: parent state regs=r1 stack=") +__msg("frame1: parent state regs= stack=") +__msg("frame0: parent state regs= stack=") +/* Parent state */ +__msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ") +__msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0") __msg("frame2: parent state regs=r1 stack=") /* frame1.r{6,7} are marked because mark_precise_scalar_ids() * looks for all registers with frame2.r1.id in the current state @@ -192,7 +190,7 @@ __msg("frame1: regs=r1 stack= before 4: (85) call pc+1") __msg("frame0: parent state regs=r1,r6 stack=") /* Parent state */ __msg("frame0: last_idx 3 first_idx 1 subseq_idx 4") -__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0") +__msg("frame0: regs=r1,r6 stack= before 3: (bf) r6 = r0") __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") __flag(BPF_F_TEST_STATE_FREQ) @@ -230,7 +228,8 @@ static __naked __noinline __used void precision_many_frames__bar(void) { asm volatile ( - /* force r1 to be precise, this immediately marks: + "if r1 > 7 goto +0;" + /* force r1 to be precise, this eventually marks: * - bar frame r1 * - foo frame r{1,6,7} * - main frame r{1,6} @@ -247,14 +246,16 @@ void precision_many_frames__bar(void) */ SEC("socket") __success __log_level(2) +__msg("11: (0f) r2 += r1") /* foo frame */ -__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10") +__msg("frame1: regs=r1 stack= before 10: (bf) r2 = r10") +__msg("frame1: regs=r1 stack= before 9: (25) if r1 > 0x7 goto pc+0") __msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1") __msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1") __msg("frame1: regs=r1 stack= before 4: (85) call pc+2") /* main frame */ -__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1") -__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1") +__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0") __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") __flag(BPF_F_TEST_STATE_FREQ) __naked void precision_stack(void) @@ -283,7 +284,8 @@ void precision_stack__foo(void) */ "*(u64*)(r10 - 8) = r1;" "*(u64*)(r10 - 16) = r1;" - /* force r1 to be precise, this immediately marks: + "if r1 > 7 goto +0;" + /* force r1 to be precise, this eventually marks: * - foo frame r1,fp{-8,-16} * - main frame r1,fp{-8} */ @@ -299,15 +301,17 @@ void precision_stack__foo(void) SEC("socket") __success __log_level(2) /* r{6,7} */ -__msg("11: (0f) r3 += r7") -__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10") +__msg("12: (0f) r3 += r7") +__msg("frame0: regs=r7 stack= before 11: (bf) r3 = r10") +__msg("frame0: regs=r7 stack= before 9: (25) if r7 > 0x7 goto pc+0") /* ... skip some insns ... */ __msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0") __msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0") /* r{8,9} */ -__msg("12: (0f) r3 += r9") -__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7") +__msg("13: (0f) r3 += r9") +__msg("frame0: regs=r9 stack= before 12: (0f) r3 += r7") /* ... skip some insns ... */ +__msg("frame0: regs=r9 stack= before 10: (25) if r9 > 0x7 goto pc+0") __msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0") __msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0") __flag(BPF_F_TEST_STATE_FREQ) @@ -328,8 +332,9 @@ __naked void precision_two_ids(void) "r9 = r0;" /* clear r0 id */ "r0 = 0;" - /* force checkpoint */ - "goto +0;" + /* propagate equal scalars precision */ + "if r7 > 7 goto +0;" + "if r9 > 7 goto +0;" "r3 = r10;" /* force r7 to be precise, this also marks r6 */ "r3 += r7;" @@ -341,6 +346,105 @@ __naked void precision_two_ids(void) : __clobber_all); } +SEC("socket") +__success __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +/* check thar r0 and r6 have different IDs after 'if', + * collect_linked_regs() can't tie more than 6 registers for a single insn. + */ +__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1") +__msg("9: (bf) r6 = r6 ; R6_w=scalar(id=2") +/* check that r{0-5} are marked precise after 'if' */ +__msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:") +__naked void linked_regs_too_many_regs(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r{0-6} IDs */ + "r1 = r0;" + "r2 = r0;" + "r3 = r0;" + "r4 = r0;" + "r5 = r0;" + "r6 = r0;" + /* propagate range for r{0-6} */ + "if r0 > 7 goto +0;" + /* make r6 appear in the log */ + "r6 = r6;" + /* force r0 to be precise, + * this would cause r{0-4} to be precise because of shared IDs + */ + "r7 = r10;" + "r7 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__failure __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +__msg("regs=r7 stack= before 5: (3d) if r8 >= r0") +__msg("parent state regs=r0,r7,r8") +__msg("regs=r0,r7,r8 stack= before 4: (25) if r0 > 0x1") +__msg("div by zero") +__naked void linked_regs_broken_link_2(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r8 = r0;" + "call %[bpf_get_prandom_u32];" + "if r0 > 1 goto +0;" + /* r7.id == r8.id, + * thus r7 precision implies r8 precision, + * which implies r0 precision because of the conditional below. + */ + "if r8 >= r0 goto 1f;" + /* break id relation between r7 and r8 */ + "r8 += r8;" + /* make r7 precise */ + "if r7 == 0 goto 1f;" + "r0 /= 0;" +"1:" + "r0 = 42;" + "exit;" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* Check that mark_chain_precision() for one of the conditional jump + * operands does not trigger equal scalars precision propagation. + */ +SEC("socket") +__success __log_level(2) +__msg("3: (25) if r1 > 0x100 goto pc+0") +__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0") +__naked void cjmp_no_linked_regs_trigger(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id */ + "r1 = r0;" + /* the jump below would be predicted, thus r1 would be marked precise, + * this should not imply precision mark for r0 + */ + "if r1 > 256 goto +0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + /* Verify that check_ids() is used by regsafe() for scalars. * * r9 = ... some pointer with range X ... diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c index 2a2271cf0294..148d2299e5b4 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c +++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> +#include <limits.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" @@ -770,6 +771,444 @@ __naked void smod64_zero_divisor(void) " ::: __clobber_all); } +SEC("socket") +__description("SDIV64, overflow r/r, LLONG_MIN/-1") +__success __retval(1) +__arch_x86_64 +__xlated("0: r2 = 0x8000000000000000") +__xlated("2: r3 = -1") +__xlated("3: r4 = r2") +__xlated("4: r11 = r3") +__xlated("5: r11 += 1") +__xlated("6: if r11 > 0x1 goto pc+4") +__xlated("7: if r11 == 0x0 goto pc+1") +__xlated("8: r2 = 0") +__xlated("9: r2 = -r2") +__xlated("10: goto pc+1") +__xlated("11: r2 s/= r3") +__xlated("12: r0 = 0") +__xlated("13: if r2 != r4 goto pc+1") +__xlated("14: r0 = 1") +__xlated("15: exit") +__naked void sdiv64_overflow_rr(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r3 = -1; \ + r4 = r2; \ + r2 s/= r3; \ + r0 = 0; \ + if r2 != r4 goto +1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, r/r, small_val/-1") +__success __retval(-5) +__arch_x86_64 +__xlated("0: r2 = 5") +__xlated("1: r3 = -1") +__xlated("2: r11 = r3") +__xlated("3: r11 += 1") +__xlated("4: if r11 > 0x1 goto pc+4") +__xlated("5: if r11 == 0x0 goto pc+1") +__xlated("6: r2 = 0") +__xlated("7: r2 = -r2") +__xlated("8: goto pc+1") +__xlated("9: r2 s/= r3") +__xlated("10: r0 = r2") +__xlated("11: exit") +__naked void sdiv64_rr_divisor_neg_1(void) +{ + asm volatile (" \ + r2 = 5; \ + r3 = -1; \ + r2 s/= r3; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, overflow r/i, LLONG_MIN/-1") +__success __retval(1) +__arch_x86_64 +__xlated("0: r2 = 0x8000000000000000") +__xlated("2: r4 = r2") +__xlated("3: r2 = -r2") +__xlated("4: r0 = 0") +__xlated("5: if r2 != r4 goto pc+1") +__xlated("6: r0 = 1") +__xlated("7: exit") +__naked void sdiv64_overflow_ri(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r4 = r2; \ + r2 s/= -1; \ + r0 = 0; \ + if r2 != r4 goto +1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SDIV64, r/i, small_val/-1") +__success __retval(-5) +__arch_x86_64 +__xlated("0: r2 = 5") +__xlated("1: r4 = r2") +__xlated("2: r2 = -r2") +__xlated("3: r0 = r2") +__xlated("4: exit") +__naked void sdiv64_ri_divisor_neg_1(void) +{ + asm volatile (" \ + r2 = 5; \ + r4 = r2; \ + r2 s/= -1; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, overflow r/r, INT_MIN/-1") +__success __retval(1) +__arch_x86_64 +__xlated("0: w2 = -2147483648") +__xlated("1: w3 = -1") +__xlated("2: w4 = w2") +__xlated("3: r11 = r3") +__xlated("4: w11 += 1") +__xlated("5: if w11 > 0x1 goto pc+4") +__xlated("6: if w11 == 0x0 goto pc+1") +__xlated("7: w2 = 0") +__xlated("8: w2 = -w2") +__xlated("9: goto pc+1") +__xlated("10: w2 s/= w3") +__xlated("11: r0 = 0") +__xlated("12: if w2 != w4 goto pc+1") +__xlated("13: r0 = 1") +__xlated("14: exit") +__naked void sdiv32_overflow_rr(void) +{ + asm volatile (" \ + w2 = %[int_min]; \ + w3 = -1; \ + w4 = w2; \ + w2 s/= w3; \ + r0 = 0; \ + if w2 != w4 goto +1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, r/r, small_val/-1") +__success __retval(5) +__arch_x86_64 +__xlated("0: w2 = -5") +__xlated("1: w3 = -1") +__xlated("2: w4 = w2") +__xlated("3: r11 = r3") +__xlated("4: w11 += 1") +__xlated("5: if w11 > 0x1 goto pc+4") +__xlated("6: if w11 == 0x0 goto pc+1") +__xlated("7: w2 = 0") +__xlated("8: w2 = -w2") +__xlated("9: goto pc+1") +__xlated("10: w2 s/= w3") +__xlated("11: w0 = w2") +__xlated("12: exit") +__naked void sdiv32_rr_divisor_neg_1(void) +{ + asm volatile (" \ + w2 = -5; \ + w3 = -1; \ + w4 = w2; \ + w2 s/= w3; \ + w0 = w2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, overflow r/i, INT_MIN/-1") +__success __retval(1) +__arch_x86_64 +__xlated("0: w2 = -2147483648") +__xlated("1: w4 = w2") +__xlated("2: w2 = -w2") +__xlated("3: r0 = 0") +__xlated("4: if w2 != w4 goto pc+1") +__xlated("5: r0 = 1") +__xlated("6: exit") +__naked void sdiv32_overflow_ri(void) +{ + asm volatile (" \ + w2 = %[int_min]; \ + w4 = w2; \ + w2 s/= -1; \ + r0 = 0; \ + if w2 != w4 goto +1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SDIV32, r/i, small_val/-1") +__success __retval(-5) +__arch_x86_64 +__xlated("0: w2 = 5") +__xlated("1: w4 = w2") +__xlated("2: w2 = -w2") +__xlated("3: w0 = w2") +__xlated("4: exit") +__naked void sdiv32_ri_divisor_neg_1(void) +{ + asm volatile (" \ + w2 = 5; \ + w4 = w2; \ + w2 s/= -1; \ + w0 = w2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SMOD64, overflow r/r, LLONG_MIN/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: r2 = 0x8000000000000000") +__xlated("2: r3 = -1") +__xlated("3: r4 = r2") +__xlated("4: r11 = r3") +__xlated("5: r11 += 1") +__xlated("6: if r11 > 0x1 goto pc+3") +__xlated("7: if r11 == 0x1 goto pc+3") +__xlated("8: w2 = 0") +__xlated("9: goto pc+1") +__xlated("10: r2 s%= r3") +__xlated("11: r0 = r2") +__xlated("12: exit") +__naked void smod64_overflow_rr(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r3 = -1; \ + r4 = r2; \ + r2 s%%= r3; \ + r0 = r2; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SMOD64, r/r, small_val/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: r2 = 5") +__xlated("1: r3 = -1") +__xlated("2: r4 = r2") +__xlated("3: r11 = r3") +__xlated("4: r11 += 1") +__xlated("5: if r11 > 0x1 goto pc+3") +__xlated("6: if r11 == 0x1 goto pc+3") +__xlated("7: w2 = 0") +__xlated("8: goto pc+1") +__xlated("9: r2 s%= r3") +__xlated("10: r0 = r2") +__xlated("11: exit") +__naked void smod64_rr_divisor_neg_1(void) +{ + asm volatile (" \ + r2 = 5; \ + r3 = -1; \ + r4 = r2; \ + r2 s%%= r3; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SMOD64, overflow r/i, LLONG_MIN/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: r2 = 0x8000000000000000") +__xlated("2: r4 = r2") +__xlated("3: w2 = 0") +__xlated("4: r0 = r2") +__xlated("5: exit") +__naked void smod64_overflow_ri(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r4 = r2; \ + r2 s%%= -1; \ + r0 = r2; \ + exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SMOD64, r/i, small_val/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: r2 = 5") +__xlated("1: r4 = r2") +__xlated("2: w2 = 0") +__xlated("3: r0 = r2") +__xlated("4: exit") +__naked void smod64_ri_divisor_neg_1(void) +{ + asm volatile (" \ + r2 = 5; \ + r4 = r2; \ + r2 s%%= -1; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SMOD32, overflow r/r, INT_MIN/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: w2 = -2147483648") +__xlated("1: w3 = -1") +__xlated("2: w4 = w2") +__xlated("3: r11 = r3") +__xlated("4: w11 += 1") +__xlated("5: if w11 > 0x1 goto pc+3") +__xlated("6: if w11 == 0x1 goto pc+4") +__xlated("7: w2 = 0") +__xlated("8: goto pc+1") +__xlated("9: w2 s%= w3") +__xlated("10: goto pc+1") +__xlated("11: w2 = w2") +__xlated("12: r0 = r2") +__xlated("13: exit") +__naked void smod32_overflow_rr(void) +{ + asm volatile (" \ + w2 = %[int_min]; \ + w3 = -1; \ + w4 = w2; \ + w2 s%%= w3; \ + r0 = r2; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SMOD32, r/r, small_val/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: w2 = -5") +__xlated("1: w3 = -1") +__xlated("2: w4 = w2") +__xlated("3: r11 = r3") +__xlated("4: w11 += 1") +__xlated("5: if w11 > 0x1 goto pc+3") +__xlated("6: if w11 == 0x1 goto pc+4") +__xlated("7: w2 = 0") +__xlated("8: goto pc+1") +__xlated("9: w2 s%= w3") +__xlated("10: goto pc+1") +__xlated("11: w2 = w2") +__xlated("12: r0 = r2") +__xlated("13: exit") +__naked void smod32_rr_divisor_neg_1(void) +{ + asm volatile (" \ + w2 = -5; \ + w3 = -1; \ + w4 = w2; \ + w2 s%%= w3; \ + r0 = r2; \ + exit; \ +" : + : + : __clobber_all); +} + +SEC("socket") +__description("SMOD32, overflow r/i, INT_MIN/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: w2 = -2147483648") +__xlated("1: w4 = w2") +__xlated("2: w2 = 0") +__xlated("3: r0 = r2") +__xlated("4: exit") +__naked void smod32_overflow_ri(void) +{ + asm volatile (" \ + w2 = %[int_min]; \ + w4 = w2; \ + w2 s%%= -1; \ + r0 = r2; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("SMOD32, r/i, small_val/-1") +__success __retval(0) +__arch_x86_64 +__xlated("0: w2 = 5") +__xlated("1: w4 = w2") +__xlated("2: w2 = 0") +__xlated("3: w0 = w2") +__xlated("4: exit") +__naked void smod32_ri_divisor_neg_1(void) +{ + asm volatile (" \ + w2 = 5; \ + w4 = w2; \ + w2 s%%= -1; \ + w0 = w2; \ + exit; \ +" : + : + : __clobber_all); +} + #else SEC("socket") diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index 85e48069c9e6..671d9f415dbf 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -402,7 +402,7 @@ __naked void spill_32bit_of_64bit_fail(void) *(u32*)(r10 - 8) = r1; \ /* 32-bit fill r2 from stack. */ \ r2 = *(u32*)(r10 - 8); \ - /* Compare r2 with another register to trigger find_equal_scalars.\ + /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on spill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ @@ -441,7 +441,7 @@ __naked void spill_16bit_of_32bit_fail(void) *(u16*)(r10 - 8) = r1; \ /* 16-bit fill r2 from stack. */ \ r2 = *(u16*)(r10 - 8); \ - /* Compare r2 with another register to trigger find_equal_scalars.\ + /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on spill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ @@ -833,7 +833,7 @@ __naked void spill_64bit_of_64bit_ok(void) *(u64*)(r10 - 8) = r0; \ /* 64-bit fill r1 from stack - should preserve the ID. */\ r1 = *(u64*)(r10 - 8); \ - /* Compare r1 with another register to trigger find_equal_scalars.\ + /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \ */ \ @@ -866,7 +866,7 @@ __naked void spill_32bit_of_32bit_ok(void) *(u32*)(r10 - 8) = r0; \ /* 32-bit fill r1 from stack - should preserve the ID. */\ r1 = *(u32*)(r10 - 8); \ - /* Compare r1 with another register to trigger find_equal_scalars.\ + /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \ */ \ @@ -899,7 +899,7 @@ __naked void spill_16bit_of_16bit_ok(void) *(u16*)(r10 - 8) = r0; \ /* 16-bit fill r1 from stack - should preserve the ID. */\ r1 = *(u16*)(r10 - 8); \ - /* Compare r1 with another register to trigger find_equal_scalars.\ + /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \ */ \ @@ -932,7 +932,7 @@ __naked void spill_8bit_of_8bit_ok(void) *(u8*)(r10 - 8) = r0; \ /* 8-bit fill r1 from stack - should preserve the ID. */\ r1 = *(u8*)(r10 - 8); \ - /* Compare r1 with another register to trigger find_equal_scalars.\ + /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \ */ \ @@ -1029,7 +1029,7 @@ __naked void fill_32bit_after_spill_64bit_preserve_id(void) "r1 = *(u32*)(r10 - 4);" #endif " \ - /* Compare r1 with another register to trigger find_equal_scalars. */\ + /* Compare r1 with another register to trigger sync_linked_regs. */\ r2 = 0; \ if r1 != r2 goto l0_%=; \ /* The result of this comparison is predefined. */\ @@ -1070,7 +1070,7 @@ __naked void fill_32bit_after_spill_64bit_clear_id(void) "r2 = *(u32*)(r10 - 4);" #endif " \ - /* Compare r2 with another register to trigger find_equal_scalars.\ + /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on fill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ @@ -1213,10 +1213,10 @@ __success __log_level(2) * - once for path entry - label 2; * - once for path entry - label 1 - label 2. */ -__msg("r1 = *(u64 *)(r10 -8)") -__msg("exit") -__msg("r1 = *(u64 *)(r10 -8)") -__msg("exit") +__msg("8: (79) r1 = *(u64 *)(r10 -8)") +__msg("9: (95) exit") +__msg("from 2 to 7") +__msg("8: safe") __msg("processed 11 insns") __flag(BPF_F_TEST_STATE_FREQ) __naked void old_stack_misc_vs_cur_ctx_ptr(void) diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c index 6a6fad625f7e..9d415f7ce599 100644 --- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -278,7 +278,7 @@ __msg("mark_precise: frame0: last_idx 14 first_idx 9") __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4") -__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0") +__msg("mark_precise: frame0: regs=r0,r6 stack= before 10: (bf) r6 = r0") __msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop") /* State entering callback body popped from states stack */ __msg("from 9 to 17: frame1:") diff --git a/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c b/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c new file mode 100644 index 000000000000..8d60c634a114 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_tailcall_jit.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +int main(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __array(values, void (void)); +} jmp_table SEC(".maps") = { + .values = { + [0] = (void *) &main, + }, +}; + +__noinline __auxiliary +static __naked int sub(void) +{ + asm volatile ( + "r2 = %[jmp_table] ll;" + "r3 = 0;" + "call 12;" + "exit;" + : + : __imm_addr(jmp_table) + : __clobber_all); +} + +__success +__arch_x86_64 +/* program entry for main(), regular function prologue */ +__jited(" endbr64") +__jited(" nopl (%rax,%rax)") +__jited(" xorq %rax, %rax") +__jited(" pushq %rbp") +__jited(" movq %rsp, %rbp") +/* tail call prologue for program: + * - establish memory location for tail call counter at &rbp[-8]; + * - spill tail_call_cnt_ptr at &rbp[-16]; + * - expect tail call counter to be passed in rax; + * - for entry program rax is a raw counter, value < 33; + * - for tail called program rax is tail_call_cnt_ptr (value > 33). + */ +__jited(" endbr64") +__jited(" cmpq $0x21, %rax") +__jited(" ja L0") +__jited(" pushq %rax") +__jited(" movq %rsp, %rax") +__jited(" jmp L1") +__jited("L0: pushq %rax") /* rbp[-8] = rax */ +__jited("L1: pushq %rax") /* rbp[-16] = rax */ +/* on subprogram call restore rax to be tail_call_cnt_ptr from rbp[-16] + * (cause original rax might be clobbered by this point) + */ +__jited(" movq -0x10(%rbp), %rax") +__jited(" callq 0x{{.*}}") /* call to sub() */ +__jited(" xorl %eax, %eax") +__jited(" leave") +__jited(" {{(retq|jmp 0x)}}") /* return or jump to rethunk */ +__jited("...") +/* subprogram entry for sub(), regular function prologue */ +__jited(" endbr64") +__jited(" nopl (%rax,%rax)") +__jited(" nopl (%rax)") +__jited(" pushq %rbp") +__jited(" movq %rsp, %rbp") +/* tail call prologue for subprogram address of tail call counter + * stored at rbp[-16]. + */ +__jited(" endbr64") +__jited(" pushq %rax") /* rbp[-8] = rax */ +__jited(" pushq %rax") /* rbp[-16] = rax */ +__jited(" movabsq ${{.*}}, %rsi") /* r2 = &jmp_table */ +__jited(" xorl %edx, %edx") /* r3 = 0 */ +/* bpf_tail_call implementation: + * - load tail_call_cnt_ptr from rbp[-16]; + * - if *tail_call_cnt_ptr < 33, increment it and jump to target; + * - otherwise do nothing. + */ +__jited(" movq -0x10(%rbp), %rax") +__jited(" cmpq $0x21, (%rax)") +__jited(" jae L0") +__jited(" nopl (%rax,%rax)") +__jited(" addq $0x1, (%rax)") /* *tail_call_cnt_ptr += 1 */ +__jited(" popq %rax") +__jited(" popq %rax") +__jited(" jmp {{.*}}") /* jump to tail call tgt */ +__jited("L0: leave") +__jited(" {{(retq|jmp 0x)}}") /* return or jump to rethunk */ +SEC("tc") +__naked int main(void) +{ + asm volatile ( + "call %[sub];" + "r0 = 0;" + "exit;" + : + : __imm(sub) + : __clobber_all); +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c new file mode 100644 index 000000000000..a7c0a553aa50 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_accept.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google LLC. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +static char buf[64]; + +SEC("lsm.s/file_open") +__success +int BPF_PROG(get_task_exe_file_and_put_kfunc_from_current_sleepable) +{ + struct file *acquired; + + acquired = bpf_get_task_exe_file(bpf_get_current_task_btf()); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm/file_open") +__success +int BPF_PROG(get_task_exe_file_and_put_kfunc_from_current_non_sleepable, struct file *file) +{ + struct file *acquired; + + acquired = bpf_get_task_exe_file(bpf_get_current_task_btf()); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/task_alloc") +__success +int BPF_PROG(get_task_exe_file_and_put_kfunc_from_argument, + struct task_struct *task) +{ + struct file *acquired; + + acquired = bpf_get_task_exe_file(task); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/inode_getattr") +__success +int BPF_PROG(path_d_path_from_path_argument, struct path *path) +{ + int ret; + + ret = bpf_path_d_path(path, buf, sizeof(buf)); + __sink(ret); + return 0; +} + +SEC("lsm.s/file_open") +__success +int BPF_PROG(path_d_path_from_file_argument, struct file *file) +{ + int ret; + struct path *path; + + /* The f_path member is a path which is embedded directly within a + * file. Therefore, a pointer to such embedded members are still + * recognized by the BPF verifier as being PTR_TRUSTED as it's + * essentially PTR_TRUSTED w/ a non-zero fixed offset. + */ + path = &file->f_path; + ret = bpf_path_d_path(path, buf, sizeof(buf)); + __sink(ret); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c new file mode 100644 index 000000000000..d6d3f4fcb24c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google LLC. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <linux/limits.h> + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +static char buf[PATH_MAX]; + +SEC("lsm.s/file_open") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(get_task_exe_file_kfunc_null) +{ + struct file *acquired; + + /* Can't pass a NULL pointer to bpf_get_task_exe_file(). */ + acquired = bpf_get_task_exe_file(NULL); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/inode_getxattr") +__failure __msg("arg#0 pointer type STRUCT task_struct must point to scalar, or struct with scalar") +int BPF_PROG(get_task_exe_file_kfunc_fp) +{ + u64 x; + struct file *acquired; + struct task_struct *task; + + task = (struct task_struct *)&x; + /* Can't pass random frame pointer to bpf_get_task_exe_file(). */ + acquired = bpf_get_task_exe_file(task); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("R1 must be referenced or trusted") +int BPF_PROG(get_task_exe_file_kfunc_untrusted) +{ + struct file *acquired; + struct task_struct *parent; + + /* Walking a trusted struct task_struct returned from + * bpf_get_current_task_btf() yields an untrusted pointer. + */ + parent = bpf_get_current_task_btf()->parent; + /* Can't pass untrusted pointer to bpf_get_task_exe_file(). */ + acquired = bpf_get_task_exe_file(parent); + if (!acquired) + return 0; + + bpf_put_file(acquired); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("Unreleased reference") +int BPF_PROG(get_task_exe_file_kfunc_unreleased) +{ + struct file *acquired; + + acquired = bpf_get_task_exe_file(bpf_get_current_task_btf()); + if (!acquired) + return 0; + + /* Acquired but never released. */ + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("release kernel function bpf_put_file expects") +int BPF_PROG(put_file_kfunc_unacquired, struct file *file) +{ + /* Can't release an unacquired pointer. */ + bpf_put_file(file); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(path_d_path_kfunc_null) +{ + /* Can't pass NULL value to bpf_path_d_path() kfunc. */ + bpf_path_d_path(NULL, buf, sizeof(buf)); + return 0; +} + +SEC("lsm.s/task_alloc") +__failure __msg("R1 must be referenced or trusted") +int BPF_PROG(path_d_path_kfunc_untrusted_from_argument, struct task_struct *task) +{ + struct path *root; + + /* Walking a trusted argument typically yields an untrusted + * pointer. This is one example of that. + */ + root = &task->fs->root; + bpf_path_d_path(root, buf, sizeof(buf)); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("R1 must be referenced or trusted") +int BPF_PROG(path_d_path_kfunc_untrusted_from_current) +{ + struct path *pwd; + struct task_struct *current; + + current = bpf_get_current_task_btf(); + /* Walking a trusted pointer returned from bpf_get_current_task_btf() + * yields an untrusted pointer. + */ + pwd = ¤t->fs->pwd; + bpf_path_d_path(pwd, buf, sizeof(buf)); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("kernel function bpf_path_d_path args#0 expected pointer to STRUCT path but R1 has a pointer to STRUCT file") +int BPF_PROG(path_d_path_kfunc_type_mismatch, struct file *file) +{ + bpf_path_d_path((struct path *)&file->f_task_work, buf, sizeof(buf)); + return 0; +} + +SEC("lsm.s/file_open") +__failure __msg("invalid access to map value, value_size=4096 off=0 size=8192") +int BPF_PROG(path_d_path_kfunc_invalid_buf_sz, struct file *file) +{ + /* bpf_path_d_path() enforces a constraint on the buffer size supplied + * by the BPF LSM program via the __sz annotation. buf here is set to + * PATH_MAX, so let's ensure that the BPF verifier rejects BPF_PROG_LOAD + * attempts if the supplied size and the actual size of the buffer + * mismatches. + */ + bpf_path_d_path(&file->f_path, buf, PATH_MAX * 2); + return 0; +} + +SEC("fentry/vfs_open") +__failure __msg("calling kernel function bpf_path_d_path is not allowed") +int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f) +{ + /* Calling bpf_path_d_path() from a non-LSM BPF program isn't permitted. + */ + bpf_path_d_path(path, buf, sizeof(buf)); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c index d037262c8937..682dda8dabbc 100644 --- a/tools/testing/selftests/bpf/progs/xdp_redirect_map.c +++ b/tools/testing/selftests/bpf/progs/xdp_redirect_map.c @@ -10,19 +10,19 @@ struct { __uint(value_size, sizeof(int)); } tx_port SEC(".maps"); -SEC("redirect_map_0") +SEC("xdp") int xdp_redirect_map_0(struct xdp_md *xdp) { return bpf_redirect_map(&tx_port, 0, 0); } -SEC("redirect_map_1") +SEC("xdp") int xdp_redirect_map_1(struct xdp_md *xdp) { return bpf_redirect_map(&tx_port, 1, 0); } -SEC("redirect_map_2") +SEC("xdp") int xdp_redirect_map_2(struct xdp_md *xdp) { return bpf_redirect_map(&tx_port, 2, 0); |