diff options
author | Hou Tao <houtao1@huawei.com> | 2024-01-05 18:48:19 +0800 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2024-01-23 14:40:21 -0800 |
commit | 17bda53e43bc41d881ca6a02b3c6f5376c55b3d3 (patch) | |
tree | 5bcce9245203d486367b3b41f121cb9ee14b8ab7 /tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c | |
parent | b4b7a4099b8ccea224577003fcf9d321bf0817b7 (diff) |
selftests/bpf: Test the inlining of bpf_kptr_xchg()
The test uses bpf_prog_get_info_by_fd() to obtain the xlated
instructions of the program first. Since these instructions have
already been rewritten by the verifier, the tests then checks whether
the rewritten instructions are as expected. And to ensure LLVM generates
code exactly as expected, use inline assembly and a naked function.
Suggested-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Hou Tao <houtao1@huawei.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20240105104819.3916743-4-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c')
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c new file mode 100644 index 000000000000..5a4bee1cf970 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <test_progs.h> + +#include "linux/filter.h" +#include "kptr_xchg_inline.skel.h" + +void test_kptr_xchg_inline(void) +{ + struct kptr_xchg_inline *skel; + struct bpf_insn *insn = NULL; + struct bpf_insn exp; + unsigned int cnt; + int err; + +#if !defined(__x86_64__) + test__skip(); + return; +#endif + + skel = kptr_xchg_inline__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_load")) + return; + + err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt); + if (!ASSERT_OK(err, "prog insn")) + goto out; + + /* The original instructions are: + * r1 = map[id:xxx][0]+0 + * r2 = 0 + * call bpf_kptr_xchg#yyy + * + * call bpf_kptr_xchg#yyy will be inlined as: + * r0 = r2 + * r0 = atomic64_xchg((u64 *)(r1 +0), r0) + */ + if (!ASSERT_GT(cnt, 5, "insn cnt")) + goto out; + + exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2); + if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov")) + goto out; + + exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0); + if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg")) + goto out; +out: + free(insn); + kptr_xchg_inline__destroy(skel); +} |