diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2022-03-17 11:03:28 +0100 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2022-03-17 11:03:28 +0100 |
commit | a8253684eb4b30abd3faf055bc475c23da748dc6 (patch) | |
tree | 17ddd1543b1f08186b90cd1cf274d969a88ed1f5 /net/ipv4/tcp.c | |
parent | fc1b6ef7bfb3d1d4df868b1c3e0480cacda6cd81 (diff) | |
parent | 09688c0166e76ce2fb85e86b9d99be8b0084cdf9 (diff) |
Merge drm/drm-fixes into drm-misc-fixes
Backmerging drm/drm-fixes for commit 3755d35ee1d2 ("drm/panel: Select
DRM_DP_HELPER for DRM_PANEL_EDP").
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3b75836db19b..28ff2a820f7c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -842,6 +842,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, } release_sock(sk); + sk_defer_free_flush(sk); if (spliced) return spliced; @@ -936,6 +937,22 @@ void tcp_remove_empty_skb(struct sock *sk) } } +/* skb changing from pure zc to mixed, must charge zc */ +static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) +{ + if (unlikely(skb_zcopy_pure(skb))) { + u32 extra = skb->truesize - + SKB_TRUESIZE(skb_end_offset(skb)); + + if (!sk_wmem_schedule(sk, extra)) + return -ENOMEM; + + sk_mem_charge(sk, extra); + skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; + } + return 0; +} + static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, struct page *page, int offset, size_t *size) { @@ -971,7 +988,7 @@ new_segment: tcp_mark_push(tp, skb); goto new_segment; } - if (!sk_wmem_schedule(sk, copy)) + if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy)) return NULL; if (can_coalesce) { @@ -1319,16 +1336,8 @@ new_segment: copy = min_t(int, copy, pfrag->size - pfrag->offset); - /* skb changing from pure zc to mixed, must charge zc */ - if (unlikely(skb_zcopy_pure(skb))) { - if (!sk_wmem_schedule(sk, skb->data_len)) - goto wait_for_space; - - sk_mem_charge(sk, skb->data_len); - skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; - } - - if (!sk_wmem_schedule(sk, copy)) + if (tcp_downgrade_zcopy_pure(sk, skb) || + !sk_wmem_schedule(sk, copy)) goto wait_for_space; err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, @@ -1675,11 +1684,13 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, if (!copied) copied = used; break; - } else if (used <= len) { - seq += used; - copied += used; - offset += used; } + if (WARN_ON_ONCE(used > len)) + used = len; + seq += used; + copied += used; + offset += used; + /* If recv_actor drops the lock (e.g. TCP splice * receive) the skb pointer might be invalid when * getting here: tcp_collapse might have deleted it |