diff options
author | David S. Miller <[email protected]> | 2018-07-09 14:55:54 -0700 |
---|---|---|
committer | David S. Miller <[email protected]> | 2018-07-09 14:55:54 -0700 |
commit | 863f4fdb715c3328d4ff1ed547c5508c8e6e2b06 (patch) | |
tree | ae65bea3d9a4e669d59c4a765cfdf007f7d0ee38 | |
parent | c47078d6a33fd78d882200cdaacbcfcd63318234 (diff) | |
parent | 9af86f9338949a9369bda5e6fed69347d1813054 (diff) |
Merge branch 'fix-use-after-free-bugs-in-skb-list-processing'
Edward Cree says:
====================
fix use-after-free bugs in skb list processing
A couple of bugs in skb list handling were spotted by Dan Carpenter, with
the help of Smatch; following up on them I found a couple more similar
cases. This series fixes them by changing the relevant loops to use the
dequeue-enqueue model (rather than in-place list modification).
v3: fixed another similar bug in __netif_receive_skb_list_core().
v2: dropped patch #3 (new list.h helper), per DaveM's request.
====================
Signed-off-by: David S. Miller <[email protected]>
-rw-r--r-- | include/linux/netfilter.h | 10 | ||||
-rw-r--r-- | net/core/dev.c | 30 |
2 files changed, 27 insertions, 13 deletions
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 5a5e0a2ab2a3..23b48de8c2e2 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -294,12 +294,16 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct sk_buff *skb, *next; + struct list_head sublist; + INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { - int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); - if (ret != 1) - list_del(&skb->list); + list_del(&skb->list); + if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1) + list_add_tail(&skb->list, &sublist); } + /* Put passed packets back on main list */ + list_splice(&sublist, head); } /* Call setsockopt() */ diff --git a/net/core/dev.c b/net/core/dev.c index 89825c1eccdc..d13cddcac41f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4830,23 +4830,28 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo struct list_head sublist; struct sk_buff *skb, *next; + INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { struct net_device *orig_dev = skb->dev; struct packet_type *pt_prev = NULL; + list_del(&skb->list); __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); + if (!pt_prev) + continue; if (pt_curr != pt_prev || od_curr != orig_dev) { /* dispatch old sublist */ - list_cut_before(&sublist, head, &skb->list); __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); /* start new sublist */ + INIT_LIST_HEAD(&sublist); pt_curr = pt_prev; od_curr = orig_dev; } + list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ - __netif_receive_skb_list_ptype(head, pt_curr, od_curr); + __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr); } static int __netif_receive_skb(struct sk_buff *skb) @@ -4982,25 +4987,30 @@ static void netif_receive_skb_list_internal(struct list_head *head) { struct bpf_prog *xdp_prog = NULL; struct sk_buff *skb, *next; + struct list_head sublist; + INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { net_timestamp_check(netdev_tstamp_prequeue, skb); - if (skb_defer_rx_timestamp(skb)) - /* Handled, remove from list */ - list_del(&skb->list); + list_del(&skb->list); + if (!skb_defer_rx_timestamp(skb)) + list_add_tail(&skb->list, &sublist); } + list_splice_init(&sublist, head); if (static_branch_unlikely(&generic_xdp_needed_key)) { preempt_disable(); rcu_read_lock(); list_for_each_entry_safe(skb, next, head, list) { xdp_prog = rcu_dereference(skb->dev->xdp_prog); - if (do_xdp_generic(xdp_prog, skb) != XDP_PASS) - /* Dropped, remove from list */ - list_del(&skb->list); + list_del(&skb->list); + if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) + list_add_tail(&skb->list, &sublist); } rcu_read_unlock(); preempt_enable(); + /* Put passed packets back on main list */ + list_splice_init(&sublist, head); } rcu_read_lock(); @@ -5011,9 +5021,9 @@ static void netif_receive_skb_list_internal(struct list_head *head) int cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu >= 0) { - enqueue_to_backlog(skb, cpu, &rflow->last_qtail); - /* Handled, remove from list */ + /* Will be handled, remove from list */ list_del(&skb->list); + enqueue_to_backlog(skb, cpu, &rflow->last_qtail); } } } |