diff options
Diffstat (limited to 'net/core/skbuff.c')
| -rw-r--r-- | net/core/skbuff.c | 47 | 
1 files changed, 31 insertions, 16 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index eb7d33b41e71..4c0879798eb8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -517,18 +517,16 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,  #ifdef HAVE_SKB_SMALL_HEAD_CACHE  	if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&  	    !(flags & KMALLOC_NOT_NORMAL_BITS)) { - -		/* skb_small_head_cache has non power of two size, -		 * likely forcing SLUB to use order-3 pages. -		 * We deliberately attempt a NOMEMALLOC allocation only. -		 */  		obj = kmem_cache_alloc_node(skb_small_head_cache,  				flags | __GFP_NOMEMALLOC | __GFP_NOWARN,  				node); -		if (obj) { -			*size = SKB_SMALL_HEAD_CACHE_SIZE; +		*size = SKB_SMALL_HEAD_CACHE_SIZE; +		if (obj || !(gfp_pfmemalloc_allowed(flags)))  			goto out; -		} +		/* Try again but now we are using pfmemalloc reserves */ +		ret_pfmemalloc = true; +		obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node); +		goto out;  	}  #endif  	*size = obj_size = kmalloc_size_roundup(obj_size); @@ -2082,6 +2080,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)  }  EXPORT_SYMBOL(skb_realloc_headroom); +/* Note: We plan to rework this in linux-6.4 */  int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)  {  	unsigned int saved_end_offset, saved_truesize; @@ -2100,6 +2099,22 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)  	if (likely(skb_end_offset(skb) == saved_end_offset))  		return 0; +#ifdef HAVE_SKB_SMALL_HEAD_CACHE +	/* We can not change skb->end if the original or new value +	 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). +	 */ +	if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || +	    skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { +		/* We think this path should not be taken. +		 * Add a temporary trace to warn us just in case. +		 */ +		pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", +			    saved_end_offset, skb_end_offset(skb)); +		WARN_ON_ONCE(1); +		return 0; +	} +#endif +  	shinfo = skb_shinfo(skb);  	/* We are about to change back skb->end, @@ -5584,18 +5599,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,  	if (skb_cloned(to))  		return false; -	/* In general, avoid mixing slab allocated and page_pool allocated -	 * pages within the same SKB. However when @to is not pp_recycle and -	 * @from is cloned, we can transition frag pages from page_pool to -	 * reference counted. -	 * -	 * On the other hand, don't allow coalescing two pp_recycle SKBs if -	 * @from is cloned, in case the SKB is using page_pool fragment +	/* In general, avoid mixing page_pool and non-page_pool allocated +	 * pages within the same SKB. Additionally avoid dealing with clones +	 * with page_pool pages, in case the SKB is using page_pool fragment  	 * references (PP_FLAG_PAGE_FRAG). Since we only take full page  	 * references for cloned SKBs at the moment that would result in  	 * inconsistent reference counts. +	 * In theory we could take full references if @from is cloned and +	 * !@to->pp_recycle but its tricky (due to potential race with +	 * the clone disappearing) and rare, so not worth dealing with.  	 */ -	if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from))) +	if (to->pp_recycle != from->pp_recycle || +	    (from->pp_recycle && skb_cloned(from)))  		return false;  	if (len <= skb_tailroom(to)) {  |