diff options
Diffstat (limited to 'arch/x86/net/bpf_jit_comp.c')
| -rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 66 | 
1 files changed, 48 insertions, 18 deletions
| diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 0fe6aacef3db..9ea57389c554 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1341,9 +1341,10 @@ st:			if (is_imm8(insn->off))  			if (insn->imm == (BPF_AND | BPF_FETCH) ||  			    insn->imm == (BPF_OR | BPF_FETCH) ||  			    insn->imm == (BPF_XOR | BPF_FETCH)) { -				u8 *branch_target;  				bool is64 = BPF_SIZE(insn->code) == BPF_DW;  				u32 real_src_reg = src_reg; +				u32 real_dst_reg = dst_reg; +				u8 *branch_target;  				/*  				 * Can't be implemented with a single x86 insn. @@ -1354,11 +1355,13 @@ st:			if (is_imm8(insn->off))  				emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);  				if (src_reg == BPF_REG_0)  					real_src_reg = BPF_REG_AX; +				if (dst_reg == BPF_REG_0) +					real_dst_reg = BPF_REG_AX;  				branch_target = prog;  				/* Load old value */  				emit_ldx(&prog, BPF_SIZE(insn->code), -					 BPF_REG_0, dst_reg, insn->off); +					 BPF_REG_0, real_dst_reg, insn->off);  				/*  				 * Perform the (commutative) operation locally,  				 * put the result in the AUX_REG. @@ -1369,7 +1372,8 @@ st:			if (is_imm8(insn->off))  				      add_2reg(0xC0, AUX_REG, real_src_reg));  				/* Attempt to swap in new value */  				err = emit_atomic(&prog, BPF_CMPXCHG, -						  dst_reg, AUX_REG, insn->off, +						  real_dst_reg, AUX_REG, +						  insn->off,  						  BPF_SIZE(insn->code));  				if (WARN_ON(err))  					return err; @@ -1383,11 +1387,10 @@ st:			if (is_imm8(insn->off))  				/* Restore R0 after clobbering RAX */  				emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);  				break; -  			}  			err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, -						  insn->off, BPF_SIZE(insn->code)); +					  insn->off, BPF_SIZE(insn->code));  			if (err)  				return err;  			break; @@ -1744,7 +1747,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,  }  static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, -			   struct bpf_prog *p, int stack_size, bool mod_ret) +			   struct bpf_prog *p, int stack_size, bool save_ret)  {  	u8 *prog = *pprog;  	u8 *jmp_insn; @@ -1777,11 +1780,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,  	if (emit_call(&prog, p->bpf_func, prog))  		return -EINVAL; -	/* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return +	/* +	 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return  	 * of the previous call which is then passed on the stack to  	 * the next BPF program. +	 * +	 * BPF_TRAMP_FENTRY trampoline may need to return the return +	 * value of BPF_PROG_TYPE_STRUCT_OPS prog.  	 */ -	if (mod_ret) +	if (save_ret)  		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);  	/* replace 2 nops with JE insn, since jmp target is known */ @@ -1828,13 +1835,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)  }  static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, -		      struct bpf_tramp_progs *tp, int stack_size) +		      struct bpf_tramp_progs *tp, int stack_size, +		      bool save_ret)  {  	int i;  	u8 *prog = *pprog;  	for (i = 0; i < tp->nr_progs; i++) { -		if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) +		if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, +				    save_ret))  			return -EINVAL;  	}  	*pprog = prog; @@ -1877,6 +1886,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,  	return 0;  } +static bool is_valid_bpf_tramp_flags(unsigned int flags) +{ +	if ((flags & BPF_TRAMP_F_RESTORE_REGS) && +	    (flags & BPF_TRAMP_F_SKIP_FRAME)) +		return false; + +	/* +	 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, +	 * and it must be used alone. +	 */ +	if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) && +	    (flags & ~BPF_TRAMP_F_RET_FENTRY_RET)) +		return false; + +	return true; +} +  /* Example:   * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);   * its 'struct btf_func_model' will be nr_args=2 @@ -1949,17 +1975,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i  	struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];  	u8 **branches = NULL;  	u8 *prog; +	bool save_ret;  	/* x86-64 supports up to 6 arguments. 7+ can be added in the future */  	if (nr_args > 6)  		return -ENOTSUPP; -	if ((flags & BPF_TRAMP_F_RESTORE_REGS) && -	    (flags & BPF_TRAMP_F_SKIP_FRAME)) +	if (!is_valid_bpf_tramp_flags(flags))  		return -EINVAL; -	if (flags & BPF_TRAMP_F_CALL_ORIG) -		stack_size += 8; /* room for return value of orig_call */ +	/* room for return value of orig_call or fentry prog */ +	save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); +	if (save_ret) +		stack_size += 8;  	if (flags & BPF_TRAMP_F_IP_ARG)  		stack_size += 8; /* room for IP address argument */ @@ -2005,7 +2033,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i  	}  	if (fentry->nr_progs) -		if (invoke_bpf(m, &prog, fentry, stack_size)) +		if (invoke_bpf(m, &prog, fentry, stack_size, +			       flags & BPF_TRAMP_F_RET_FENTRY_RET))  			return -EINVAL;  	if (fmod_ret->nr_progs) { @@ -2052,7 +2081,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i  	}  	if (fexit->nr_progs) -		if (invoke_bpf(m, &prog, fexit, stack_size)) { +		if (invoke_bpf(m, &prog, fexit, stack_size, false)) {  			ret = -EINVAL;  			goto cleanup;  		} @@ -2072,9 +2101,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i  			ret = -EINVAL;  			goto cleanup;  		} -		/* restore original return value back into RAX */ -		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);  	} +	/* restore return value of orig_call or fentry prog back into RAX */ +	if (save_ret) +		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);  	EMIT1(0x5B); /* pop rbx */  	EMIT1(0xC9); /* leave */ |