diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 13 | ||||
-rw-r--r-- | net/core/ethtool.c | 2 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 12 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 | ||||
-rw-r--r-- | net/core/sock.c | 12 |
5 files changed, 15 insertions, 26 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 4040673f806a..fc1e289397f5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2213,6 +2213,17 @@ __be16 skb_network_protocol(struct sk_buff *skb) __be16 type = skb->protocol; int vlan_depth = ETH_HLEN; + /* Tunnel gso handlers can set protocol to ethernet. */ + if (type == htons(ETH_P_TEB)) { + struct ethhdr *eth; + + if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) + return 0; + + eth = (struct ethhdr *)skb_mac_header(skb); + type = eth->h_proto; + } + while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { struct vlan_hdr *vh; @@ -2456,7 +2467,7 @@ EXPORT_SYMBOL(netif_skb_features); * 2. skb is fragmented and the device does not support SG. */ static inline int skb_needs_linearize(struct sk_buff *skb, - int features) + netdev_features_t features) { return skb_is_nonlinear(skb) && ((skb_has_frag_list(skb) && diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 5a934ef90f8b..22efdaa76ebf 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1421,7 +1421,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) void __user *useraddr = ifr->ifr_data; u32 ethcmd; int rc; - u32 old_features; + netdev_features_t old_features; if (!dev || !netif_device_present(dev)) return -ENODEV; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 7427ab5e27d8..981fed397d1d 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -606,21 +606,11 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, return sprintf(buf, "%lu\n", val); } -static void rps_dev_flow_table_release_work(struct work_struct *work) -{ - struct rps_dev_flow_table *table = container_of(work, - struct rps_dev_flow_table, free_work); - - vfree(table); -} - static void rps_dev_flow_table_release(struct rcu_head *rcu) { struct rps_dev_flow_table *table = container_of(rcu, struct rps_dev_flow_table, rcu); - - INIT_WORK(&table->free_work, rps_dev_flow_table_release_work); - schedule_work(&table->free_work); + vfree(table); } static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a5802a8b652f..cec074be8c43 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -206,7 +206,7 @@ static void netpoll_poll_dev(struct net_device *dev) * the dev_open/close paths use this to block netpoll activity * while changing device state */ - if (!down_trylock(&ni->dev_lock)) + if (down_trylock(&ni->dev_lock)) return; if (!netif_running(dev)) { diff --git a/net/core/sock.c b/net/core/sock.c index d4f4cea726e7..6ba327da79e1 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1217,18 +1217,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk) #endif } -/* - * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes - * un-modified. Special care is taken when initializing object to zero. - */ -static inline void sk_prot_clear_nulls(struct sock *sk, int size) -{ - if (offsetof(struct sock, sk_node.next) != 0) - memset(sk, 0, offsetof(struct sock, sk_node.next)); - memset(&sk->sk_node.pprev, 0, - size - offsetof(struct sock, sk_node.pprev)); -} - void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) { unsigned long nulls1, nulls2; |