diff options
Diffstat (limited to 'net/core')
| -rw-r--r-- | net/core/dev.c | 29 | ||||
| -rw-r--r-- | net/core/dev_ioctl.c | 6 | ||||
| -rw-r--r-- | net/core/devlink.c | 6 | ||||
| -rw-r--r-- | net/core/drop_monitor.c | 2 | ||||
| -rw-r--r-- | net/core/filter.c | 77 | ||||
| -rw-r--r-- | net/core/netpoll.c | 2 | ||||
| -rw-r--r-- | net/core/pktgen.c | 4 | ||||
| -rw-r--r-- | net/core/skbuff.c | 16 | ||||
| -rw-r--r-- | net/core/skmsg.c | 1 | ||||
| -rw-r--r-- | net/core/sock.c | 4 | 
10 files changed, 102 insertions, 45 deletions
| diff --git a/net/core/dev.c b/net/core/dev.c index 7df6c9617321..4086d335978c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4690,10 +4690,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,  		break;  	default:  		bpf_warn_invalid_xdp_action(act); -		/* fall through */ +		fallthrough;  	case XDP_ABORTED:  		trace_xdp_exception(skb->dev, xdp_prog, act); -		/* fall through */ +		fallthrough;  	case XDP_DROP:  	do_drop:  		kfree_skb(skb); @@ -6612,12 +6612,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,  		netdev_err_once(dev, "%s() called with weight %d\n", __func__,  				weight);  	napi->weight = weight; -	list_add(&napi->dev_list, &dev->napi_list);  	napi->dev = dev;  #ifdef CONFIG_NETPOLL  	napi->poll_owner = -1;  #endif  	set_bit(NAPI_STATE_SCHED, &napi->state); +	set_bit(NAPI_STATE_NPSVC, &napi->state); +	list_add_rcu(&napi->dev_list, &dev->napi_list);  	napi_hash_add(napi);  }  EXPORT_SYMBOL(netif_napi_add); @@ -8742,13 +8743,15 @@ struct bpf_xdp_link {  	int flags;  }; -static enum bpf_xdp_mode dev_xdp_mode(u32 flags) +static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)  {  	if (flags & XDP_FLAGS_HW_MODE)  		return XDP_MODE_HW;  	if (flags & XDP_FLAGS_DRV_MODE)  		return XDP_MODE_DRV; -	return XDP_MODE_SKB; +	if (flags & XDP_FLAGS_SKB_MODE) +		return XDP_MODE_SKB; +	return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;  }  static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) @@ -8896,7 +8899,7 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack  		return -EINVAL;  	} -	mode = dev_xdp_mode(flags); +	mode = dev_xdp_mode(dev, flags);  	/* can't replace attached link */  	if (dev_xdp_link(dev, mode)) {  		NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); @@ -8913,10 +8916,6 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack  		NL_SET_ERR_MSG(extack, "Active program does not match expected");  		return -EEXIST;  	} -	if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { -		NL_SET_ERR_MSG(extack, "XDP program already attached"); -		return -EBUSY; -	}  	/* put effective new program into new_prog */  	if (link) @@ -8927,6 +8926,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack  		enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB  					       ? XDP_MODE_DRV : XDP_MODE_SKB; +		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { +			NL_SET_ERR_MSG(extack, "XDP program already attached"); +			return -EBUSY; +		}  		if (!offload && dev_xdp_prog(dev, other_mode)) {  			NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");  			return -EEXIST; @@ -8984,7 +8987,7 @@ static int dev_xdp_detach_link(struct net_device *dev,  	ASSERT_RTNL(); -	mode = dev_xdp_mode(link->flags); +	mode = dev_xdp_mode(dev, link->flags);  	if (dev_xdp_link(dev, mode) != link)  		return -EINVAL; @@ -9080,7 +9083,7 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,  		goto out_unlock;  	} -	mode = dev_xdp_mode(xdp_link->flags); +	mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);  	bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);  	err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,  			      xdp_link->flags, new_prog); @@ -9164,7 +9167,7 @@ out_put_dev:  int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,  		      int fd, int expected_fd, u32 flags)  { -	enum bpf_xdp_mode mode = dev_xdp_mode(flags); +	enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);  	struct bpf_prog *new_prog = NULL, *old_prog = NULL;  	int err; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index b2cf9b7bb7b8..205e92e604ef 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -322,7 +322,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)  		err = net_hwtstamp_validate(ifr);  		if (err)  			return err; -		/* fall through */ +		fallthrough;  	/*  	 *	Unknown or private ioctl @@ -478,7 +478,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c  	case SIOCSIFTXQLEN:  		if (!capable(CAP_NET_ADMIN))  			return -EPERM; -		/* fall through */ +		fallthrough;  	/*  	 *	These ioctl calls:  	 *	- require local superuser power. @@ -503,7 +503,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c  	case SIOCSHWTSTAMP:  		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))  			return -EPERM; -		/* fall through */ +		fallthrough;  	case SIOCBONDSLAVEINFOQUERY:  	case SIOCBONDINFOQUERY:  		dev_load(net, ifr->ifr_name); diff --git a/net/core/devlink.c b/net/core/devlink.c index e674f0f46dc2..80ec1cd81c64 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4063,7 +4063,7 @@ static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)  {  	lockdep_assert_held(&devlink->lock); -	if (WARN_ON(xa_load(&devlink->snapshot_ids, id))) +	if (xa_load(&devlink->snapshot_ids, id))  		return -EEXIST;  	return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0), @@ -6196,8 +6196,8 @@ devlink_trap_action_get_from_info(struct genl_info *info,  	val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);  	switch (val) { -	case DEVLINK_TRAP_ACTION_DROP: /* fall-through */ -	case DEVLINK_TRAP_ACTION_TRAP: /* fall-through */ +	case DEVLINK_TRAP_ACTION_DROP: +	case DEVLINK_TRAP_ACTION_TRAP:  	case DEVLINK_TRAP_ACTION_MIRROR:  		*p_trap_action = val;  		break; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index b09bebeadf0b..9704522b0872 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -1189,7 +1189,7 @@ static int net_dm_alert_mode_get_from_info(struct genl_info *info,  	val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]);  	switch (val) { -	case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */ +	case NET_DM_ALERT_MODE_SUMMARY:  	case NET_DM_ALERT_MODE_PACKET:  		*p_alert_mode = val;  		break; diff --git a/net/core/filter.c b/net/core/filter.c index 7124f0fe6974..1f647ab986b6 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8317,15 +8317,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,  /* Helper macro for adding read access to tcp_sock or sock fields. */  #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)			      \  	do {								      \ +		int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2;     \  		BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >		      \  			     sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \ +		if (si->dst_reg == reg || si->src_reg == reg)		      \ +			reg--;						      \ +		if (si->dst_reg == reg || si->src_reg == reg)		      \ +			reg--;						      \ +		if (si->dst_reg == si->src_reg) {			      \ +			*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,	      \ +					  offsetof(struct bpf_sock_ops_kern,  \ +					  temp));			      \ +			fullsock_reg = reg;				      \ +			jmp += 2;					      \ +		}							      \  		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \  						struct bpf_sock_ops_kern,     \  						is_fullsock),		      \ -				      si->dst_reg, si->src_reg,		      \ +				      fullsock_reg, si->src_reg,	      \  				      offsetof(struct bpf_sock_ops_kern,      \  					       is_fullsock));		      \ -		*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2);	      \ +		*insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);	      \ +		if (si->dst_reg == si->src_reg)				      \ +			*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,	      \ +				      offsetof(struct bpf_sock_ops_kern,      \ +				      temp));				      \  		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \  						struct bpf_sock_ops_kern, sk),\  				      si->dst_reg, si->src_reg,		      \ @@ -8334,6 +8350,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,  						       OBJ_FIELD),	      \  				      si->dst_reg, si->dst_reg,		      \  				      offsetof(OBJ, OBJ_FIELD));	      \ +		if (si->dst_reg == si->src_reg)	{			      \ +			*insn++ = BPF_JMP_A(1);				      \ +			*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,	      \ +				      offsetof(struct bpf_sock_ops_kern,      \ +				      temp));				      \ +		}							      \ +	} while (0) + +#define SOCK_OPS_GET_SK()							      \ +	do {								      \ +		int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1;     \ +		if (si->dst_reg == reg || si->src_reg == reg)		      \ +			reg--;						      \ +		if (si->dst_reg == reg || si->src_reg == reg)		      \ +			reg--;						      \ +		if (si->dst_reg == si->src_reg) {			      \ +			*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,	      \ +					  offsetof(struct bpf_sock_ops_kern,  \ +					  temp));			      \ +			fullsock_reg = reg;				      \ +			jmp += 2;					      \ +		}							      \ +		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \ +						struct bpf_sock_ops_kern,     \ +						is_fullsock),		      \ +				      fullsock_reg, si->src_reg,	      \ +				      offsetof(struct bpf_sock_ops_kern,      \ +					       is_fullsock));		      \ +		*insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);	      \ +		if (si->dst_reg == si->src_reg)				      \ +			*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,	      \ +				      offsetof(struct bpf_sock_ops_kern,      \ +				      temp));				      \ +		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \ +						struct bpf_sock_ops_kern, sk),\ +				      si->dst_reg, si->src_reg,		      \ +				      offsetof(struct bpf_sock_ops_kern, sk));\ +		if (si->dst_reg == si->src_reg)	{			      \ +			*insn++ = BPF_JMP_A(1);				      \ +			*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,	      \ +				      offsetof(struct bpf_sock_ops_kern,      \ +				      temp));				      \ +		}							      \  	} while (0)  #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ @@ -8620,17 +8679,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,  		SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);  		break;  	case offsetof(struct bpf_sock_ops, sk): -		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( -						struct bpf_sock_ops_kern, -						is_fullsock), -				      si->dst_reg, si->src_reg, -				      offsetof(struct bpf_sock_ops_kern, -					       is_fullsock)); -		*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); -		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( -						struct bpf_sock_ops_kern, sk), -				      si->dst_reg, si->src_reg, -				      offsetof(struct bpf_sock_ops_kern, sk)); +		SOCK_OPS_GET_SK();  		break;  	}  	return insn - insn_buf; @@ -9174,7 +9223,7 @@ sk_reuseport_is_valid_access(int off, int size,  	case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):  		if (size < sizeof_field(struct sk_buff, protocol))  			return false; -		/* fall through */ +		fallthrough;  	case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):  	case bpf_ctx_range(struct sk_reuseport_md, bind_inany):  	case bpf_ctx_range(struct sk_reuseport_md, len): diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 093e90e52bc2..2338753e936b 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -162,7 +162,7 @@ static void poll_napi(struct net_device *dev)  	struct napi_struct *napi;  	int cpu = smp_processor_id(); -	list_for_each_entry(napi, &dev->napi_list, dev_list) { +	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {  		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {  			poll_one_napi(napi);  			smp_store_release(&napi->poll_owner, -1); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b53b6d38c4df..44fdbb9c6e53 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3430,7 +3430,7 @@ xmit_more:  		net_info_ratelimited("%s xmit error: %d\n",  				     pkt_dev->odevname, ret);  		pkt_dev->errors++; -		/* fall through */ +		fallthrough;  	case NETDEV_TX_BUSY:  		/* Retry it next time */  		refcount_dec(&(pkt_dev->skb->users)); @@ -3699,7 +3699,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)  				   cpu_to_node(cpu),  				   "kpktgend_%d", cpu);  	if (IS_ERR(p)) { -		pr_err("kernel_thread() failed for cpu %d\n", t->cpu); +		pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);  		list_del(&t->th_list);  		kfree(t);  		return PTR_ERR(p); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7e2e502ef519..6faf73d6a0f7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -820,6 +820,7 @@ void skb_tx_error(struct sk_buff *skb)  }  EXPORT_SYMBOL(skb_tx_error); +#ifdef CONFIG_TRACEPOINTS  /**   *	consume_skb - free an skbuff   *	@skb: buffer to free @@ -837,6 +838,7 @@ void consume_skb(struct sk_buff *skb)  	__kfree_skb(skb);  }  EXPORT_SYMBOL(consume_skb); +#endif  /**   *	consume_stateless_skb - free an skbuff, assuming it is stateless @@ -5418,8 +5420,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)  	skb = skb_share_check(skb, GFP_ATOMIC);  	if (unlikely(!skb))  		goto err_free; - -	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) +	/* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ +	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))  		goto err_free;  	vhdr = (struct vlan_hdr *)skb->data; @@ -5987,9 +5989,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,  	if (skb_has_frag_list(skb))  		skb_clone_fraglist(skb); -	if (k == 0) { -		/* split line is in frag list */ -		pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); +	/* split line is in frag list */ +	if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { +		/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ +		if (skb_has_frag_list(skb)) +			kfree_skb_list(skb_shinfo(skb)->frag_list); +		kfree(data); +		return -ENOMEM;  	}  	skb_release_data(skb); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 6a32a1fd34f8..649583158983 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -772,7 +772,6 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,  		sk_psock_skb_redirect(skb);  		break;  	case __SK_DROP: -		/* fall-through */  	default:  out_free:  		kfree_skb(skb); diff --git a/net/core/sock.c b/net/core/sock.c index e4f40b175acb..6c5c6b18eff4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1008,7 +1008,7 @@ set_sndbuf:  		break;  	case SO_TIMESTAMPING_NEW:  		sock_set_flag(sk, SOCK_TSTAMP_NEW); -		/* fall through */ +		fallthrough;  	case SO_TIMESTAMPING_OLD:  		if (val & ~SOF_TIMESTAMPING_MASK) {  			ret = -EINVAL; @@ -3254,7 +3254,7 @@ void sk_common_release(struct sock *sk)  		sk->sk_prot->destroy(sk);  	/* -	 * Observation: when sock_common_release is called, processes have +	 * Observation: when sk_common_release is called, processes have  	 * no access to socket. But net still has.  	 * Step one, detach it from networking:  	 * |