diff options
Diffstat (limited to 'kernel/trace/bpf_trace.c')
| -rw-r--r-- | kernel/trace/bpf_trace.c | 79 | 
1 files changed, 65 insertions, 14 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 19e793aa441a..ca1796747a77 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -83,7 +83,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)  	if (in_nmi()) /* not supported yet */  		return 1; -	preempt_disable(); +	cant_sleep();  	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {  		/* @@ -115,11 +115,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)   out:  	__this_cpu_dec(bpf_prog_active); -	preempt_enable();  	return ret;  } -EXPORT_SYMBOL_GPL(trace_call_bpf);  #ifdef CONFIG_BPF_KPROBE_OVERRIDE  BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) @@ -732,7 +730,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)  	if (unlikely(!nmi_uaccess_okay()))  		return -EPERM; -	if (in_nmi()) { +	if (irqs_disabled()) {  		/* Do an early check on signal validity. Otherwise,  		 * the error is lost in deferred irq_work.  		 */ @@ -781,8 +779,8 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {  	.arg1_type	= ARG_ANYTHING,  }; -static const struct bpf_func_proto * -tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +const struct bpf_func_proto * +bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  {  	switch (func_id) {  	case BPF_FUNC_map_lookup_elem: @@ -843,6 +841,10 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  		return &bpf_send_signal_proto;  	case BPF_FUNC_send_signal_thread:  		return &bpf_send_signal_thread_proto; +	case BPF_FUNC_perf_event_read_value: +		return &bpf_perf_event_read_value_proto; +	case BPF_FUNC_get_ns_current_pid_tgid: +		return &bpf_get_ns_current_pid_tgid_proto;  	default:  		return NULL;  	} @@ -858,14 +860,12 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  		return &bpf_get_stackid_proto;  	case BPF_FUNC_get_stack:  		return &bpf_get_stack_proto; -	case BPF_FUNC_perf_event_read_value: -		return &bpf_perf_event_read_value_proto;  #ifdef CONFIG_BPF_KPROBE_OVERRIDE  	case BPF_FUNC_override_return:  		return &bpf_override_return_proto;  #endif  	default: -		return tracing_func_proto(func_id, prog); +		return bpf_tracing_func_proto(func_id, prog);  	}  } @@ -975,7 +975,7 @@ tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  	case BPF_FUNC_get_stack:  		return &bpf_get_stack_proto_tp;  	default: -		return tracing_func_proto(func_id, prog); +		return bpf_tracing_func_proto(func_id, prog);  	}  } @@ -1028,6 +1028,45 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {           .arg3_type      = ARG_CONST_SIZE,  }; +BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, +	   void *, buf, u32, size, u64, flags) +{ +#ifndef CONFIG_X86 +	return -ENOENT; +#else +	static const u32 br_entry_size = sizeof(struct perf_branch_entry); +	struct perf_branch_stack *br_stack = ctx->data->br_stack; +	u32 to_copy; + +	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) +		return -EINVAL; + +	if (unlikely(!br_stack)) +		return -EINVAL; + +	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) +		return br_stack->nr * br_entry_size; + +	if (!buf || (size % br_entry_size != 0)) +		return -EINVAL; + +	to_copy = min_t(u32, br_stack->nr * br_entry_size, size); +	memcpy(buf, br_stack->entries, to_copy); + +	return to_copy; +#endif +} + +static const struct bpf_func_proto bpf_read_branch_records_proto = { +	.func           = bpf_read_branch_records, +	.gpl_only       = true, +	.ret_type       = RET_INTEGER, +	.arg1_type      = ARG_PTR_TO_CTX, +	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL, +	.arg3_type      = ARG_CONST_SIZE_OR_ZERO, +	.arg4_type      = ARG_ANYTHING, +}; +  static const struct bpf_func_proto *  pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  { @@ -1040,8 +1079,10 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  		return &bpf_get_stack_proto_tp;  	case BPF_FUNC_perf_prog_read_value:  		return &bpf_perf_prog_read_value_proto; +	case BPF_FUNC_read_branch_records: +		return &bpf_read_branch_records_proto;  	default: -		return tracing_func_proto(func_id, prog); +		return bpf_tracing_func_proto(func_id, prog);  	}  } @@ -1104,6 +1145,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {  };  extern const struct bpf_func_proto bpf_skb_output_proto; +extern const struct bpf_func_proto bpf_xdp_output_proto;  BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,  	   struct bpf_map *, map, u64, flags) @@ -1168,7 +1210,7 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  	case BPF_FUNC_get_stack:  		return &bpf_get_stack_proto_raw_tp;  	default: -		return tracing_func_proto(func_id, prog); +		return bpf_tracing_func_proto(func_id, prog);  	}  } @@ -1179,6 +1221,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)  #ifdef CONFIG_NET  	case BPF_FUNC_skb_output:  		return &bpf_skb_output_proto; +	case BPF_FUNC_xdp_output: +		return &bpf_xdp_output_proto;  #endif  	default:  		return raw_tp_prog_func_proto(func_id, prog); @@ -1213,6 +1257,13 @@ static bool tracing_prog_is_valid_access(int off, int size,  	return btf_ctx_access(off, size, type, prog, info);  } +int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, +				     const union bpf_attr *kattr, +				     union bpf_attr __user *uattr) +{ +	return -ENOTSUPP; +} +  const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {  	.get_func_proto  = raw_tp_prog_func_proto,  	.is_valid_access = raw_tp_prog_is_valid_access, @@ -1227,6 +1278,7 @@ const struct bpf_verifier_ops tracing_verifier_ops = {  };  const struct bpf_prog_ops tracing_prog_ops = { +	.test_run = bpf_prog_test_run_tracing,  };  static bool raw_tp_writable_prog_is_valid_access(int off, int size, @@ -1475,10 +1527,9 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)  static __always_inline  void __bpf_trace_run(struct bpf_prog *prog, u64 *args)  { +	cant_sleep();  	rcu_read_lock(); -	preempt_disable();  	(void) BPF_PROG_RUN(prog, args); -	preempt_enable();  	rcu_read_unlock();  }  |