aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <[email protected]>2024-03-28 14:40:30 +0000
committerJakub Kicinski <[email protected]>2024-03-29 15:03:10 -0700
commit6a1f12dd85a8b24f871dfcf467378660af9c064d (patch)
treecb52518234de5cc5eabe543f60265f8b3699ae35
parent60557969951304dad829f2829019907dfb43ecb3 (diff)
udp: relax atomic operation on sk->sk_rmem_alloc
atomic_add_return() is more expensive than atomic_add() and seems overkill in UDP rx fast path. Signed-off-by: Eric Dumazet <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
-rw-r--r--net/ipv4/udp.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6a39e7fa0616..19d7db4563ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1516,12 +1516,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
size = skb->truesize;
udp_set_dev_scratch(skb);
- /* we drop only if the receive buf is full and the receive
- * queue contains some other skb
- */
- rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
- if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
- goto uncharge_drop;
+ atomic_add(size, &sk->sk_rmem_alloc);
spin_lock(&list->lock);
err = udp_rmem_schedule(sk, size);