aboutsummaryrefslogtreecommitdiff
path: root/include/net/ipv6_frag.h
diff options
context:
space:
mode:
authorDavid S. Miller <[email protected]>2019-01-25 21:37:11 -0800
committerDavid S. Miller <[email protected]>2019-01-25 21:37:51 -0800
commitdda6a7a3685bb1c35d3b500ab83d9780d681d69d (patch)
tree299dc0f94d0626e3f75d61450507135f0892389d /include/net/ipv6_frag.h
parentccaceadc4effcb83bb5e84026b6f23c1ae3e0608 (diff)
parent4c3510483d26420d2c2c7cc075ad872286cc5932 (diff)
Merge branch 'ipv6-defrag-rbtree'
Peter Oskolkov says: ==================== net: IP defrag: use rbtrees in IPv6 defragmentation Currently, IPv6 defragmentation code drops non-last fragments that are smaller than 1280 bytes: see commit 0ed4229b08c1 ("ipv6: defrag: drop non-last frags smaller than min mtu") This behavior is not specified in IPv6 RFCs and appears to break compatibility with some IPv6 implementations, as reported here: https://www.spinics.net/lists/netdev/msg543846.html This patchset contains four patches: - patch 1 moves rbtree-related code from IPv4 to files shared b/w IPv4/IPv6 - patch 2 changes IPv6 defragmenation code to use rbtrees for defrag queue - patch 3 changes nf_conntrack IPv6 defragmentation code to use rbtrees - patch 4 changes ip_defrag selftest to test changes made in the previous three patches. Along the way, the 1280-byte restrictions are removed. I plan to introduce similar changes to 6lowpan defragmentation code once I figure out how to test it. ==================== Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
Diffstat (limited to 'include/net/ipv6_frag.h')
-rw-r--r--include/net/ipv6_frag.h11
1 files changed, 9 insertions, 2 deletions
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 6ced1e6899b6..28aa9b30aece 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -82,8 +82,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
/* Don't send error if the first segment did not arrive. */
- head = fq->q.fragments;
- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN))
+ goto out;
+
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+ * pull the head out of the tree in order to be able to
+ * deal with head->dev.
+ */
+ head = inet_frag_pull_head(&fq->q);
+ if (!head)
goto out;
head->dev = dev;