diff options
Diffstat (limited to 'kernel/bpf/verifier.c')
| -rw-r--r-- | kernel/bpf/verifier.c | 77 | 
1 files changed, 62 insertions, 15 deletions
| diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 799b2451ef2d..c48ca2a34b5e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)  {  	struct bpf_verifier_state *parent = state->parent; +	if (regno == BPF_REG_FP) +		/* We don't need to worry about FP liveness because it's read-only */ +		return; +  	while (parent) {  		/* if read wasn't screened by an earlier write ... */  		if (state->regs[regno].live & REG_LIVE_WRITTEN) @@ -1112,7 +1116,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn  		/* ctx accesses must be at a fixed offset, so that we can  		 * determine what type of data were returned.  		 */ -		if (!tnum_is_const(reg->var_off)) { +		if (reg->off) { +			verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", +				regno, reg->off, off - reg->off); +			return -EACCES; +		} +		if (!tnum_is_const(reg->var_off) || reg->var_off.value) {  			char tn_buf[48];  			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); @@ -1120,7 +1129,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn  				tn_buf, off, size);  			return -EACCES;  		} -		off += reg->var_off.value;  		err = check_ctx_access(env, insn_idx, off, size, t, ®_type);  		if (!err && t == BPF_READ && value_regno >= 0) {  			/* ctx access returns either a scalar, or a @@ -2345,6 +2353,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)  				 * copy register state to dest reg  				 */  				regs[insn->dst_reg] = regs[insn->src_reg]; +				regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;  			} else {  				/* R1 = (u32) R2 */  				if (is_pointer_value(env, insn->src_reg)) { @@ -2421,12 +2430,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)  }  static void find_good_pkt_pointers(struct bpf_verifier_state *state, -				   struct bpf_reg_state *dst_reg) +				   struct bpf_reg_state *dst_reg, +				   bool range_right_open)  {  	struct bpf_reg_state *regs = state->regs, *reg; +	u16 new_range;  	int i; -	if (dst_reg->off < 0) +	if (dst_reg->off < 0 || +	    (dst_reg->off == 0 && range_right_open))  		/* This doesn't give us any range */  		return; @@ -2437,9 +2449,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,  		 */  		return; -	/* LLVM can generate four kind of checks: +	new_range = dst_reg->off; +	if (range_right_open) +		new_range--; + +	/* Examples for register markings:  	 * -	 * Type 1/2: +	 * pkt_data in dst register:  	 *  	 *   r2 = r3;  	 *   r2 += 8; @@ -2456,7 +2472,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,  	 *     r2=pkt(id=n,off=8,r=0)  	 *     r3=pkt(id=n,off=0,r=0)  	 * -	 * Type 3/4: +	 * pkt_data in src register:  	 *  	 *   r2 = r3;  	 *   r2 += 8; @@ -2474,7 +2490,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,  	 *     r3=pkt(id=n,off=0,r=0)  	 *  	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) -	 * so that range of bytes [r3, r3 + 8) is safe to access. +	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) +	 * and [r3, r3 + 8-1) respectively is safe to access depending on +	 * the check.  	 */  	/* If our ids match, then we must have the same max_value.  And we @@ -2485,14 +2503,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,  	for (i = 0; i < MAX_BPF_REG; i++)  		if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)  			/* keep the maximum range already checked */ -			regs[i].range = max_t(u16, regs[i].range, dst_reg->off); +			regs[i].range = max(regs[i].range, new_range);  	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {  		if (state->stack_slot_type[i] != STACK_SPILL)  			continue;  		reg = &state->spilled_regs[i / BPF_REG_SIZE];  		if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) -			reg->range = max_t(u16, reg->range, dst_reg->off); +			reg->range = max(reg->range, new_range);  	}  } @@ -2856,19 +2874,43 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,  	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&  		   dst_reg->type == PTR_TO_PACKET &&  		   regs[insn->src_reg].type == PTR_TO_PACKET_END) { -		find_good_pkt_pointers(this_branch, dst_reg); +		/* pkt_data' > pkt_end */ +		find_good_pkt_pointers(this_branch, dst_reg, false); +	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && +		   dst_reg->type == PTR_TO_PACKET_END && +		   regs[insn->src_reg].type == PTR_TO_PACKET) { +		/* pkt_end > pkt_data' */ +		find_good_pkt_pointers(other_branch, ®s[insn->src_reg], true); +	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && +		   dst_reg->type == PTR_TO_PACKET && +		   regs[insn->src_reg].type == PTR_TO_PACKET_END) { +		/* pkt_data' < pkt_end */ +		find_good_pkt_pointers(other_branch, dst_reg, true);  	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && +		   dst_reg->type == PTR_TO_PACKET_END && +		   regs[insn->src_reg].type == PTR_TO_PACKET) { +		/* pkt_end < pkt_data' */ +		find_good_pkt_pointers(this_branch, ®s[insn->src_reg], false); +	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&  		   dst_reg->type == PTR_TO_PACKET &&  		   regs[insn->src_reg].type == PTR_TO_PACKET_END) { -		find_good_pkt_pointers(other_branch, dst_reg); +		/* pkt_data' >= pkt_end */ +		find_good_pkt_pointers(this_branch, dst_reg, true);  	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&  		   dst_reg->type == PTR_TO_PACKET_END &&  		   regs[insn->src_reg].type == PTR_TO_PACKET) { -		find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); +		/* pkt_end >= pkt_data' */ +		find_good_pkt_pointers(other_branch, ®s[insn->src_reg], false); +	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && +		   dst_reg->type == PTR_TO_PACKET && +		   regs[insn->src_reg].type == PTR_TO_PACKET_END) { +		/* pkt_data' <= pkt_end */ +		find_good_pkt_pointers(other_branch, dst_reg, false);  	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&  		   dst_reg->type == PTR_TO_PACKET_END &&  		   regs[insn->src_reg].type == PTR_TO_PACKET) { -		find_good_pkt_pointers(this_branch, ®s[insn->src_reg]); +		/* pkt_end <= pkt_data' */ +		find_good_pkt_pointers(this_branch, ®s[insn->src_reg], true);  	} else if (is_pointer_value(env, insn->dst_reg)) {  		verbose("R%d pointer comparison prohibited\n", insn->dst_reg);  		return -EACCES; @@ -4205,7 +4247,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)  		}  		if (insn->imm == BPF_FUNC_redirect_map) { -			u64 addr = (unsigned long)prog; +			/* Note, we cannot use prog directly as imm as subsequent +			 * rewrites would still change the prog pointer. The only +			 * stable address we can use is aux, which also works with +			 * prog clones during blinding. +			 */ +			u64 addr = (unsigned long)prog->aux;  			struct bpf_insn r4_ld[] = {  				BPF_LD_IMM64(BPF_REG_4, addr),  				*insn, |