diff options
Diffstat (limited to 'net/core/skbuff.c')
| -rw-r--r-- | net/core/skbuff.c | 54 | 
1 files changed, 34 insertions, 20 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 45707059082f..4eaf7ed0d1f4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -550,7 +550,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,  			     bool *pfmemalloc)  {  	bool ret_pfmemalloc = false; -	unsigned int obj_size; +	size_t obj_size;  	void *obj;  	obj_size = SKB_HEAD_ALIGN(*size); @@ -567,7 +567,13 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,  		obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);  		goto out;  	} -	*size = obj_size = kmalloc_size_roundup(obj_size); + +	obj_size = kmalloc_size_roundup(obj_size); +	/* The following cast might truncate high-order bits of obj_size, this +	 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. +	 */ +	*size = (unsigned int)obj_size; +  	/*  	 * Try a regular allocation, when that fails and we're not entitled  	 * to the reserves, fail. @@ -4423,21 +4429,20 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,  	struct sk_buff *segs = NULL;  	struct sk_buff *tail = NULL;  	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; -	skb_frag_t *frag = skb_shinfo(head_skb)->frags;  	unsigned int mss = skb_shinfo(head_skb)->gso_size;  	unsigned int doffset = head_skb->data - skb_mac_header(head_skb); -	struct sk_buff *frag_skb = head_skb;  	unsigned int offset = doffset;  	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);  	unsigned int partial_segs = 0;  	unsigned int headroom;  	unsigned int len = head_skb->len; +	struct sk_buff *frag_skb; +	skb_frag_t *frag;  	__be16 proto;  	bool csum, sg; -	int nfrags = skb_shinfo(head_skb)->nr_frags;  	int err = -ENOMEM;  	int i = 0; -	int pos; +	int nfrags, pos;  	if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&  	    mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { @@ -4514,6 +4519,13 @@ normal:  	headroom = skb_headroom(head_skb);  	pos = skb_headlen(head_skb); +	if (skb_orphan_frags(head_skb, GFP_ATOMIC)) +		return ERR_PTR(-ENOMEM); + +	nfrags = skb_shinfo(head_skb)->nr_frags; +	frag = skb_shinfo(head_skb)->frags; +	frag_skb = head_skb; +  	do {  		struct sk_buff *nskb;  		skb_frag_t *nskb_frag; @@ -4534,6 +4546,10 @@ normal:  		    (skb_headlen(list_skb) == len || sg)) {  			BUG_ON(skb_headlen(list_skb) > len); +			nskb = skb_clone(list_skb, GFP_ATOMIC); +			if (unlikely(!nskb)) +				goto err; +  			i = 0;  			nfrags = skb_shinfo(list_skb)->nr_frags;  			frag = skb_shinfo(list_skb)->frags; @@ -4552,12 +4568,8 @@ normal:  				frag++;  			} -			nskb = skb_clone(list_skb, GFP_ATOMIC);  			list_skb = list_skb->next; -			if (unlikely(!nskb)) -				goto err; -  			if (unlikely(pskb_trim(nskb, len))) {  				kfree_skb(nskb);  				goto err; @@ -4633,12 +4645,16 @@ normal:  		skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &  					   SKBFL_SHARED_FRAG; -		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || -		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) +		if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))  			goto err;  		while (pos < offset + len) {  			if (i >= nfrags) { +				if (skb_orphan_frags(list_skb, GFP_ATOMIC) || +				    skb_zerocopy_clone(nskb, list_skb, +						       GFP_ATOMIC)) +					goto err; +  				i = 0;  				nfrags = skb_shinfo(list_skb)->nr_frags;  				frag = skb_shinfo(list_skb)->frags; @@ -4652,10 +4668,6 @@ normal:  					i--;  					frag--;  				} -				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || -				    skb_zerocopy_clone(nskb, frag_skb, -						       GFP_ATOMIC)) -					goto err;  				list_skb = list_skb->next;  			} @@ -5207,7 +5219,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,  	serr->ee.ee_info = tstype;  	serr->opt_stats = opt_stats;  	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; -	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { +	if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {  		serr->ee.ee_data = skb_shinfo(skb)->tskey;  		if (sk_is_tcp(sk))  			serr->ee.ee_data -= atomic_read(&sk->sk_tskey); @@ -5263,21 +5275,23 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,  {  	struct sk_buff *skb;  	bool tsonly, opt_stats = false; +	u32 tsflags;  	if (!sk)  		return; -	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && +	tsflags = READ_ONCE(sk->sk_tsflags); +	if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&  	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)  		return; -	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; +	tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;  	if (!skb_may_tx_timestamp(sk, tsonly))  		return;  	if (tsonly) {  #ifdef CONFIG_INET -		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && +		if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&  		    sk_is_tcp(sk)) {  			skb = tcp_get_timestamping_opt_stats(sk, orig_skb,  							     ack_skb);  |