aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2024-06-27 13:59:43 +0100
committerPaolo Abeni <pabeni@redhat.com>2024-07-02 12:06:50 +0200
commitaeb320fc05c74e1d3b429aa0e3a777b8a931c189 (patch)
treec653020dc9e79e01e3bccbfca123f887a53b1702 /net/core
parent7fb05423fed41686ccc1a76c20d486728f62023f (diff)
net: batch zerocopy_fill_skb_from_iter accounting
Instead of accounting every page range against the socket separately, do it in batch based on the change in skb->truesize. It's also moved into __zerocopy_sg_from_iter(), so that zerocopy_fill_skb_from_iter() is simpler and responsible for setting frags but not the accounting. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ef81d6ecbe1e..b0dccefd4a09 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -610,7 +610,7 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iter);
-static int zerocopy_fill_skb_from_iter(struct sock *sk, struct sk_buff *skb,
+static int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
int frag = skb_shinfo(skb)->nr_frags;
@@ -621,7 +621,6 @@ static int zerocopy_fill_skb_from_iter(struct sock *sk, struct sk_buff *skb,
int refs, order, n = 0;
size_t start;
ssize_t copied;
- unsigned long truesize;
if (frag == MAX_SKB_FRAGS)
return -EMSGSIZE;
@@ -633,17 +632,9 @@ static int zerocopy_fill_skb_from_iter(struct sock *sk, struct sk_buff *skb,
length -= copied;
- truesize = PAGE_ALIGN(copied + start);
skb->data_len += copied;
skb->len += copied;
- skb->truesize += truesize;
- if (sk && sk->sk_type == SOCK_STREAM) {
- sk_wmem_queued_add(sk, truesize);
- if (!skb_zcopy_pure(skb))
- sk_mem_charge(sk, truesize);
- } else {
- refcount_add(truesize, &skb->sk->sk_wmem_alloc);
- }
+ skb->truesize += PAGE_ALIGN(copied + start);
head = compound_head(pages[n]);
order = compound_order(head);
@@ -691,10 +682,24 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from,
size_t length)
{
+ unsigned long orig_size = skb->truesize;
+ unsigned long truesize;
+ int ret;
+
if (msg && msg->msg_ubuf && msg->sg_from_iter)
return msg->sg_from_iter(sk, skb, from, length);
- else
- return zerocopy_fill_skb_from_iter(sk, skb, from, length);
+
+ ret = zerocopy_fill_skb_from_iter(skb, from, length);
+ truesize = skb->truesize - orig_size;
+
+ if (sk && sk->sk_type == SOCK_STREAM) {
+ sk_wmem_queued_add(sk, truesize);
+ if (!skb_zcopy_pure(skb))
+ sk_mem_charge(sk, truesize);
+ } else {
+ refcount_add(truesize, &skb->sk->sk_wmem_alloc);
+ }
+ return ret;
}
EXPORT_SYMBOL(__zerocopy_sg_from_iter);