diff options
| author | Linus Torvalds <[email protected]> | 2021-11-01 13:24:43 -0700 | 
|---|---|---|
| committer | Linus Torvalds <[email protected]> | 2021-11-01 13:24:43 -0700 | 
| commit | 43aa0a195f06101bcb5d8d711bba0dd24b33a1a0 (patch) | |
| tree | 0236661db875f519cc80e11fde210fdfc9b2be76 /arch/x86/net/bpf_jit_comp32.c | |
| parent | 595b28fb0c8949463d8ec1e485f36d17c870ddb2 (diff) | |
| parent | 87c87ecd00c54ecd677798cb49ef27329e0fab41 (diff) | |
Merge tag 'objtool-core-2021-10-31' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull objtool updates from Thomas Gleixner:
 - Improve retpoline code patching by separating it from alternatives
   which reduces memory footprint and allows to do better optimizations
   in the actual runtime patching.
 - Add proper retpoline support for x86/BPF
 - Address noinstr warnings in x86/kvm, lockdep and paravirtualization
   code
 - Add support to handle pv_opsindirect calls in the noinstr analysis
 - Classify symbols upfront and cache the result to avoid redundant
   str*cmp() invocations.
 - Add a CFI hash to reduce memory consumption which also reduces
   runtime on a allyesconfig by ~50%
 - Adjust XEN code to make objtool handling more robust and as a side
   effect to prevent text fragmentation due to placement of the
   hypercall page.
* tag 'objtool-core-2021-10-31' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits)
  bpf,x86: Respect X86_FEATURE_RETPOLINE*
  bpf,x86: Simplify computing label offsets
  x86,bugs: Unconditionally allow spectre_v2=retpoline,amd
  x86/alternative: Add debug prints to apply_retpolines()
  x86/alternative: Try inline spectre_v2=retpoline,amd
  x86/alternative: Handle Jcc __x86_indirect_thunk_\reg
  x86/alternative: Implement .retpoline_sites support
  x86/retpoline: Create a retpoline thunk array
  x86/retpoline: Move the retpoline thunk declarations to nospec-branch.h
  x86/asm: Fixup odd GEN-for-each-reg.h usage
  x86/asm: Fix register order
  x86/retpoline: Remove unused replacement symbols
  objtool,x86: Replace alternatives with .retpoline_sites
  objtool: Shrink struct instruction
  objtool: Explicitly avoid self modifying code in .altinstr_replacement
  objtool: Classify symbols
  objtool: Support pv_opsindirect calls for noinstr
  x86/xen: Rework the xen_{cpu,irq,mmu}_opsarrays
  x86/xen: Mark xen_force_evtchn_callback() noinstr
  x86/xen: Make irq_disable() noinstr
  ...
Diffstat (limited to 'arch/x86/net/bpf_jit_comp32.c')
| -rw-r--r-- | arch/x86/net/bpf_jit_comp32.c | 22 | 
1 files changed, 19 insertions, 3 deletions
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 3bfda5f502cb..da9b7cfa4632 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -15,6 +15,7 @@  #include <asm/cacheflush.h>  #include <asm/set_memory.h>  #include <asm/nospec-branch.h> +#include <asm/asm-prototypes.h>  #include <linux/bpf.h>  /* @@ -1267,6 +1268,21 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth)  	*pprog = prog;  } +static int emit_jmp_edx(u8 **pprog, u8 *ip) +{ +	u8 *prog = *pprog; +	int cnt = 0; + +#ifdef CONFIG_RETPOLINE +	EMIT1_off32(0xE9, (u8 *)__x86_indirect_thunk_edx - (ip + 5)); +#else +	EMIT2(0xFF, 0xE2); +#endif +	*pprog = prog; + +	return cnt; +} +  /*   * Generate the following code:   * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... @@ -1280,7 +1296,7 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth)   *   goto *(prog->bpf_func + prologue_size);   * out:   */ -static void emit_bpf_tail_call(u8 **pprog) +static void emit_bpf_tail_call(u8 **pprog, u8 *ip)  {  	u8 *prog = *pprog;  	int cnt = 0; @@ -1362,7 +1378,7 @@ static void emit_bpf_tail_call(u8 **pprog)  	 * eax == ctx (1st arg)  	 * edx == prog->bpf_func + prologue_size  	 */ -	RETPOLINE_EDX_BPF_JIT(); +	cnt += emit_jmp_edx(&prog, ip + cnt);  	if (jmp_label1 == -1)  		jmp_label1 = cnt; @@ -2122,7 +2138,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,  			break;  		}  		case BPF_JMP | BPF_TAIL_CALL: -			emit_bpf_tail_call(&prog); +			emit_bpf_tail_call(&prog, image + addrs[i - 1]);  			break;  		/* cond jump */  |