diff options
| author | Alexei Starovoitov <[email protected]> | 2024-08-22 08:35:21 -0700 |
|---|---|---|
| committer | Alexei Starovoitov <[email protected]> | 2024-08-22 08:35:22 -0700 |
| commit | d352eca2662734cdd5ef90df1f8bc28b9505e36f (patch) | |
| tree | 12248bcf91ee0c83c6f25e055a42a85acfac3323 /include/linux | |
| parent | 6d641ca50d7ec7d5e4e889c3f8ea22afebc2a403 (diff) | |
| parent | 8c2e043daadad021fc501ac64cce131f48c3ca46 (diff) | |
Merge branch 'support-bpf_fastcall-patterns-for-calls-to-kfuncs'
Eduard Zingerman says:
====================
support bpf_fastcall patterns for calls to kfuncs
As an extension of [1], allow bpf_fastcall patterns for kfuncs:
- pattern rules are the same as for helpers;
- spill/fill removal is allowed only for kfuncs listed in the
is_fastcall_kfunc_call (under assumption that such kfuncs would
always be members of special_kfunc_list).
Allow bpf_fastcall rewrite for bpf_cast_to_kern_ctx() and
bpf_rdonly_cast() in order to conjure selftests for this feature.
After this patch-set verifier would rewrite the program below:
r2 = 1
*(u64 *)(r10 - 32) = r2
call %[bpf_cast_to_kern_ctx]
r2 = *(u64 *)(r10 - 32)
r0 = r2;"
As follows:
r2 = 1 /* spill/fill at r10[-32] is removed */
r0 = r1 /* replacement for bpf_cast_to_kern_ctx() */
r0 = r2
exit
Also, attribute used by LLVM implementation of the feature had been
changed from no_caller_saved_registers to bpf_fastcall (see [2]).
This patch-set replaces references to nocsr by references to
bpf_fastcall to keep LLVM and Kernel parts in sync.
[1] no_caller_saved_registers attribute for helper calls
https://lore.kernel.org/bpf/[email protected]/
[2] [BPF] introduce __attribute__((bpf_fastcall))
https://github.com/llvm/llvm-project/pull/105417
Changes v2->v3:
- added a patch fixing arch_mask handling in test_loader,
otherwise newly added tests for the feature were skipped
(a fix for regression introduced by a recent commit);
- fixed warning regarding unused 'params' variable;
- applied stylistical fixes suggested by Yonghong;
- added acks from Yonghong;
Changes v1->v2:
- added two patches replacing all mentions of nocsr by bpf_fastcall
(suggested by Andrii);
- removed KF_NOCSR flag (suggested by Yonghong).
v1: https://lore.kernel.org/bpf/[email protected]/
v2: https://lore.kernel.org/bpf/[email protected]/
====================
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/bpf.h | 6 | ||||
| -rw-r--r-- | include/linux/bpf_verifier.h | 18 |
2 files changed, 12 insertions, 12 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f0192c173ed8..00dc4dd28cbd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -808,12 +808,12 @@ struct bpf_func_proto { bool gpl_only; bool pkt_access; bool might_sleep; - /* set to true if helper follows contract for gcc/llvm - * attribute no_caller_saved_registers: + /* set to true if helper follows contract for llvm + * attribute bpf_fastcall: * - void functions do not scratch r0 * - functions taking N arguments scratch only registers r1-rN */ - bool allow_nocsr; + bool allow_fastcall; enum bpf_return_type ret_type; union { struct { diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 5cea15c81b8a..634a302a39e3 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -577,13 +577,13 @@ struct bpf_insn_aux_data { bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */ u8 alu_state; /* used in combination with alu_limit */ /* true if STX or LDX instruction is a part of a spill/fill - * pattern for a no_caller_saved_registers call. + * pattern for a bpf_fastcall call. */ - u8 nocsr_pattern:1; + u8 fastcall_pattern:1; /* for CALL instructions, a number of spill/fill pairs in the - * no_caller_saved_registers pattern. + * bpf_fastcall pattern. */ - u8 nocsr_spills_num:3; + u8 fastcall_spills_num:3; /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ @@ -653,10 +653,10 @@ struct bpf_subprog_info { u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ u16 stack_extra; - /* offsets in range [stack_depth .. nocsr_stack_off) - * are used for no_caller_saved_registers spills and fills. + /* offsets in range [stack_depth .. fastcall_stack_off) + * are used for bpf_fastcall spills and fills. */ - s16 nocsr_stack_off; + s16 fastcall_stack_off; bool has_tail_call: 1; bool tail_call_reachable: 1; bool has_ld_abs: 1; @@ -664,8 +664,8 @@ struct bpf_subprog_info { bool is_async_cb: 1; bool is_exception_cb: 1; bool args_cached: 1; - /* true if nocsr stack region is used by functions that can't be inlined */ - bool keep_nocsr_stack: 1; + /* true if bpf_fastcall stack region is used by functions that can't be inlined */ + bool keep_fastcall_stack: 1; u8 arg_cnt; struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; |