diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2022-03-17 11:03:28 +0100 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2022-03-17 11:03:28 +0100 |
commit | a8253684eb4b30abd3faf055bc475c23da748dc6 (patch) | |
tree | 17ddd1543b1f08186b90cd1cf274d969a88ed1f5 /net/core/skbuff.c | |
parent | fc1b6ef7bfb3d1d4df868b1c3e0480cacda6cd81 (diff) | |
parent | 09688c0166e76ce2fb85e86b9d99be8b0084cdf9 (diff) |
Merge drm/drm-fixes into drm-misc-fixes
Backmerging drm/drm-fixes for commit 3755d35ee1d2 ("drm/panel: Select
DRM_DP_HELPER for DRM_PANEL_EDP").
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0118f0afaa4f..ea51e23e9247 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -681,7 +681,7 @@ exit: * while trying to recycle fragments on __skb_frag_unref() we need * to make one SKB responsible for triggering the recycle path. * So disable the recycling bit if an SKB is cloned and we have - * additional references to to the fragmented part of the SKB. + * additional references to the fragmented part of the SKB. * Eventually the last SKB will have the recycling bit set and it's * dataref set to 0, which will trigger the recycling */ @@ -2276,7 +2276,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) /* Free pulled out fragments. */ while ((list = skb_shinfo(skb)->frag_list) != insp) { skb_shinfo(skb)->frag_list = list->next; - kfree_skb(list); + consume_skb(list); } /* And insert new clone at head. */ if (clone) { @@ -3876,6 +3876,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, list_skb = list_skb->next; err = 0; + delta_truesize += nskb->truesize; if (skb_shared(nskb)) { tmp = skb_clone(nskb, GFP_ATOMIC); if (tmp) { @@ -3900,7 +3901,6 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, tail = nskb; delta_len += nskb->len; - delta_truesize += nskb->truesize; skb_push(nskb, -skb_network_offset(nskb) + offset); @@ -4730,7 +4730,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb, if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { serr->ee.ee_data = skb_shinfo(skb)->tskey; if (sk_is_tcp(sk)) - serr->ee.ee_data -= sk->sk_tskey; + serr->ee.ee_data -= atomic_read(&sk->sk_tskey); } err = sock_queue_err_skb(sk, skb); @@ -6105,7 +6105,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb, /* Free pulled out fragments. */ while ((list = shinfo->frag_list) != insp) { shinfo->frag_list = list->next; - kfree_skb(list); + consume_skb(list); } /* And insert new clone at head. */ if (clone) { |