diff options
author | Alexei Starovoitov <[email protected]> | 2023-11-01 22:54:28 -0700 |
---|---|---|
committer | Alexei Starovoitov <[email protected]> | 2023-11-01 22:54:28 -0700 |
commit | 94e88b8a3e50d3e60c3ba6a5c316729587595210 (patch) | |
tree | 168c8cc8161f049ccfa8fb6f39ff3f6a0d8454bf | |
parent | 698b8c5e3b5505ac00102caf9e4843b71192b586 (diff) | |
parent | 3c41971550f58f2e006c58aa71e8c23ad312110f (diff) |
Merge branch 'bpf-fix-precision-tracking-for-bpf_alu-bpf_to_be-bpf_end'
Shung-Hsi Yu says:
====================
bpf: Fix precision tracking for BPF_ALU | BPF_TO_BE | BPF_END
Changes since v1:
- add test for negation and bswap (Alexei, Eduard)
- add test for BPF_TO_LE as well to cover all types of BPF_END opcode
- remove vals map and trigger backtracking with jump instead, based of
Eduard's code
- v1 at https://lore.kernel.org/bpf/[email protected]
This patchset fixes and adds selftest for the issue reported by Mohamed
Mahmoud and Toke Høiland-Jørgensen where the kernel can run into a
verifier bug during backtracking of BPF_ALU | BPF_TO_BE | BPF_END
instruction[0]. As seen in the verifier log below, r0 was incorrectly
marked as precise even tough its value was not being used.
Patch 1 fixes the issue based on Andrii's analysis, and patch 2 adds a
selftest for such case using inline assembly. Please see individual
patch for detail.
...
mark_precise: frame2: regs=r2 stack= before 1891: (77) r2 >>= 56
mark_precise: frame2: regs=r2 stack= before 1890: (dc) r2 = be64 r2
mark_precise: frame2: regs=r0,r2 stack= before 1889: (73) *(u8 *)(r1 +47) = r3
...
mark_precise: frame2: regs=r0 stack= before 212: (85) call pc+1617
BUG regs 1
processed 5112 insns (limit 1000000) max_states_per_insn 4 total_states 92 peak_states 90 mark_read 20
0: https://lore.kernel.org/r/[email protected]
Shung-Hsi Yu (2):
bpf: Fix precision tracking for BPF_ALU | BPF_TO_BE | BPF_END
selftests/bpf: precision tracking test for BPF_NEG and BPF_END
kernel/bpf/verifier.c | 7 +-
.../selftests/bpf/prog_tests/verifier.c | 2 +
.../selftests/bpf/progs/verifier_precision.c | 93 +++++++++++++++++++
3 files changed, 101 insertions(+), 1 deletion(-)
create mode 100644 tools/testing/selftests/bpf/progs/verifier_precision.c
base-commit: c17cda15cc86e65e9725641daddcd7a63cc9ad01
====================
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
-rw-r--r-- | kernel/bpf/verifier.c | 7 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/verifier.c | 2 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/verifier_precision.c | 93 |
3 files changed, 101 insertions, 1 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index def99b1a2b17..bd1c42eb540f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3742,7 +3742,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, if (class == BPF_ALU || class == BPF_ALU64) { if (!bt_is_reg_set(bt, dreg)) return 0; - if (opcode == BPF_MOV) { + if (opcode == BPF_END || opcode == BPF_NEG) { + /* sreg is reserved and unused + * dreg still need precision before this insn + */ + return 0; + } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { /* dreg = sreg or dreg = (s8, s16, s32)sreg * dreg needs precision after this insn diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index e3e68c97b40c..e5c61aa6604a 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -46,6 +46,7 @@ #include "verifier_movsx.skel.h" #include "verifier_netfilter_ctx.skel.h" #include "verifier_netfilter_retcode.skel.h" +#include "verifier_precision.skel.h" #include "verifier_prevent_map_lookup.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" @@ -153,6 +154,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_movsx(void) { RUN(verifier_movsx); } void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } +void test_verifier_precision(void) { RUN(verifier_precision); } void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c new file mode 100644 index 000000000000..193c0f8272d0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_precision.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 SUSE LLC */ +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2") +__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2") +__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8") +__naked int bpf_neg(void) +{ + asm volatile ( + "r2 = 8;" + "r2 = -r2;" + "if r2 != -8 goto 1f;" + "r1 = r10;" + "r1 += r2;" + "1:" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") +__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2") +__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") +__naked int bpf_end_to_le(void) +{ + asm volatile ( + "r2 = 0;" + "r2 = le16 r2;" + "if r2 != 0 goto 1f;" + "r1 = r10;" + "r1 += r2;" + "1:" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") +__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2") +__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") +__naked int bpf_end_to_be(void) +{ + asm volatile ( + "r2 = 0;" + "r2 = be16 r2;" + "if r2 != 0 goto 1f;" + "r1 = r10;" + "r1 += r2;" + "1:" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ + __clang_major__ >= 18 + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") +__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2") +__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") +__naked int bpf_end_bswap(void) +{ + asm volatile ( + "r2 = 0;" + "r2 = bswap16 r2;" + "if r2 != 0 goto 1f;" + "r1 = r10;" + "r1 += r2;" + "1:" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +#endif /* v4 instruction */ |