diff options
Diffstat (limited to 'kernel/bpf/core.c')
| -rw-r--r-- | kernel/bpf/core.c | 118 | 
1 files changed, 74 insertions, 44 deletions
| diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 9f1493705f40..4d09e610777f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,  	return prog_adj;  } +void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) +{ +	int i; + +	for (i = 0; i < fp->aux->func_cnt; i++) +		bpf_prog_kallsyms_del(fp->aux->func[i]); +} + +void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) +{ +	bpf_prog_kallsyms_del_subprogs(fp); +	bpf_prog_kallsyms_del(fp); +} +  #ifdef CONFIG_BPF_JIT  /* All BPF JIT sysctl knobs here. */  int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); @@ -1434,6 +1448,17 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)  	return 0;  } +static void bpf_prog_select_func(struct bpf_prog *fp) +{ +#ifndef CONFIG_BPF_JIT_ALWAYS_ON +	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); + +	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; +#else +	fp->bpf_func = __bpf_prog_ret0_warn; +#endif +} +  /**   *	bpf_prog_select_runtime - select exec runtime for BPF program   *	@fp: bpf_prog populated with internal BPF program @@ -1444,13 +1469,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)   */  struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)  { -#ifndef CONFIG_BPF_JIT_ALWAYS_ON -	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); +	/* In case of BPF to BPF calls, verifier did all the prep +	 * work with regards to JITing, etc. +	 */ +	if (fp->bpf_func) +		goto finalize; -	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; -#else -	fp->bpf_func = __bpf_prog_ret0_warn; -#endif +	bpf_prog_select_func(fp);  	/* eBPF JITs can rewrite the program in case constant  	 * blinding is active. However, in case of error during @@ -1471,6 +1496,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)  		if (*err)  			return fp;  	} + +finalize:  	bpf_prog_lock_ro(fp);  	/* The tail call compatibility check can only be done at @@ -1511,11 +1538,12 @@ static struct {  	.null_prog = NULL,  }; -struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) +struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)  {  	if (prog_cnt)  		return kzalloc(sizeof(struct bpf_prog_array) + -			       sizeof(struct bpf_prog *) * (prog_cnt + 1), +			       sizeof(struct bpf_prog_array_item) * +			       (prog_cnt + 1),  			       flags);  	return &empty_prog_array.hdr; @@ -1529,43 +1557,45 @@ void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)  	kfree_rcu(progs, rcu);  } -int bpf_prog_array_length(struct bpf_prog_array __rcu *progs) +int bpf_prog_array_length(struct bpf_prog_array __rcu *array)  { -	struct bpf_prog **prog; +	struct bpf_prog_array_item *item;  	u32 cnt = 0;  	rcu_read_lock(); -	prog = rcu_dereference(progs)->progs; -	for (; *prog; prog++) -		if (*prog != &dummy_bpf_prog.prog) +	item = rcu_dereference(array)->items; +	for (; item->prog; item++) +		if (item->prog != &dummy_bpf_prog.prog)  			cnt++;  	rcu_read_unlock();  	return cnt;  } -static bool bpf_prog_array_copy_core(struct bpf_prog **prog, + +static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,  				     u32 *prog_ids,  				     u32 request_cnt)  { +	struct bpf_prog_array_item *item;  	int i = 0; -	for (; *prog; prog++) { -		if (*prog == &dummy_bpf_prog.prog) +	item = rcu_dereference(array)->items; +	for (; item->prog; item++) { +		if (item->prog == &dummy_bpf_prog.prog)  			continue; -		prog_ids[i] = (*prog)->aux->id; +		prog_ids[i] = item->prog->aux->id;  		if (++i == request_cnt) { -			prog++; +			item++;  			break;  		}  	} -	return !!(*prog); +	return !!(item->prog);  } -int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, +int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,  				__u32 __user *prog_ids, u32 cnt)  { -	struct bpf_prog **prog;  	unsigned long err = 0;  	bool nospc;  	u32 *ids; @@ -1584,8 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,  	if (!ids)  		return -ENOMEM;  	rcu_read_lock(); -	prog = rcu_dereference(progs)->progs; -	nospc = bpf_prog_array_copy_core(prog, ids, cnt); +	nospc = bpf_prog_array_copy_core(array, ids, cnt);  	rcu_read_unlock();  	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));  	kfree(ids); @@ -1596,14 +1625,14 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,  	return 0;  } -void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, +void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,  				struct bpf_prog *old_prog)  { -	struct bpf_prog **prog = progs->progs; +	struct bpf_prog_array_item *item = array->items; -	for (; *prog; prog++) -		if (*prog == old_prog) { -			WRITE_ONCE(*prog, &dummy_bpf_prog.prog); +	for (; item->prog; item++) +		if (item->prog == old_prog) { +			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);  			break;  		}  } @@ -1614,7 +1643,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,  			struct bpf_prog_array **new_array)  {  	int new_prog_cnt, carry_prog_cnt = 0; -	struct bpf_prog **existing_prog; +	struct bpf_prog_array_item *existing;  	struct bpf_prog_array *array;  	bool found_exclude = false;  	int new_prog_idx = 0; @@ -1623,15 +1652,15 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,  	 * the new array.  	 */  	if (old_array) { -		existing_prog = old_array->progs; -		for (; *existing_prog; existing_prog++) { -			if (*existing_prog == exclude_prog) { +		existing = old_array->items; +		for (; existing->prog; existing++) { +			if (existing->prog == exclude_prog) {  				found_exclude = true;  				continue;  			} -			if (*existing_prog != &dummy_bpf_prog.prog) +			if (existing->prog != &dummy_bpf_prog.prog)  				carry_prog_cnt++; -			if (*existing_prog == include_prog) +			if (existing->prog == include_prog)  				return -EEXIST;  		}  	} @@ -1657,15 +1686,17 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,  	/* Fill in the new prog array */  	if (carry_prog_cnt) { -		existing_prog = old_array->progs; -		for (; *existing_prog; existing_prog++) -			if (*existing_prog != exclude_prog && -			    *existing_prog != &dummy_bpf_prog.prog) -				array->progs[new_prog_idx++] = *existing_prog; +		existing = old_array->items; +		for (; existing->prog; existing++) +			if (existing->prog != exclude_prog && +			    existing->prog != &dummy_bpf_prog.prog) { +				array->items[new_prog_idx++].prog = +					existing->prog; +			}  	}  	if (include_prog) -		array->progs[new_prog_idx++] = include_prog; -	array->progs[new_prog_idx] = NULL; +		array->items[new_prog_idx++].prog = include_prog; +	array->items[new_prog_idx].prog = NULL;  	*new_array = array;  	return 0;  } @@ -1674,7 +1705,6 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,  			     u32 *prog_ids, u32 request_cnt,  			     u32 *prog_cnt)  { -	struct bpf_prog **prog;  	u32 cnt = 0;  	if (array) @@ -1687,8 +1717,7 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,  		return 0;  	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */ -	prog = rcu_dereference_check(array, 1)->progs; -	return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC +	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC  								     : 0;  } @@ -1766,6 +1795,7 @@ const struct bpf_func_proto bpf_get_current_comm_proto __weak;  const struct bpf_func_proto bpf_sock_map_update_proto __weak;  const struct bpf_func_proto bpf_sock_hash_update_proto __weak;  const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; +const struct bpf_func_proto bpf_get_local_storage_proto __weak;  const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)  { |