aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c490
1 files changed, 177 insertions, 313 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 718931d951bc..45ae33e15303 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -28,6 +28,7 @@
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
#include <linux/numa.h>
+#include <generated/utsrelease.h>
#include <scsi/fc/fc_fcoe.h>
#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
@@ -35,7 +36,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xfrm.h>
#include "ixgbe.h"
@@ -56,8 +57,6 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "5.1.0-k"
-const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2016 Intel Corporation.";
@@ -165,7 +164,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_AUTHOR("Intel Corporation, <[email protected]>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *ixgbe_wq;
@@ -1397,7 +1395,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
IXGBE_DCA_CTRL_DCA_MODE_CB2);
break;
}
- /* fall through - DCA is disabled. */
+ fallthrough; /* DCA is disabled. */
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
dca_remove_requester(dev);
@@ -2097,10 +2095,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2163,10 +2159,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -2215,7 +2208,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_PASS:
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
result = IXGBE_XDP_CONSUMED;
break;
@@ -2231,10 +2224,10 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping packet */
+ fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
result = IXGBE_XDP_CONSUMED;
break;
@@ -2244,19 +2237,30 @@ xdp_out:
return ERR_PTR(-result);
}
+static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
unsigned int size)
{
+ unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
-
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -2290,6 +2294,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
+#endif
+
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
@@ -2323,7 +2332,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp.data_hard_start = xdp.data -
ixgbe_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
+#endif
skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
@@ -2954,35 +2966,6 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
/* skip the flush */
}
-static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
-{
- u32 mask;
- struct ixgbe_hw *hw = &adapter->hw;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- mask = (qmask & 0xFFFFFFFF);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
- mask = (qmask >> 32);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
- break;
- default:
- break;
- }
- /* skip the flush */
-}
-
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
@@ -3019,7 +3002,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_82599EB:
mask |= IXGBE_EIMS_GPI_SDP1(hw);
mask |= IXGBE_EIMS_GPI_SDP2(hw);
- /* fall through */
+ fallthrough;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
@@ -3168,7 +3151,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
ixgbe_clean_tx_irq(q_vector, ring, budget);
@@ -3188,7 +3171,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
ixgbe_clean_rx_irq_zc(q_vector, ring,
per_ring_budget) :
ixgbe_clean_rx_irq(q_vector, ring,
@@ -3325,7 +3308,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
- /* Fall through */
+ fallthrough;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
@@ -3483,9 +3466,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
- ring->xsk_umem = NULL;
+ ring->xsk_pool = NULL;
if (ring_is_xdp(ring))
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
/* disable queue to avoid issues while updating state */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
@@ -3725,9 +3708,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- if (rx_ring->xsk_umem) {
- u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ if (rx_ring->xsk_pool) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
/* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4072,13 +4054,12 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u8 reg_idx = ring->reg_idx;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
- if (ring->xsk_umem) {
- ring->zca.free = ixgbe_zca_free;
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
+ if (ring->xsk_pool) {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &ring->zca));
-
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
@@ -4133,9 +4114,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
- if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
- u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
@@ -4157,7 +4137,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- if (ring->xsk_umem)
+ if (ring->xsk_pool)
ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
else
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
@@ -4350,7 +4330,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
- /* fall through */
+ fallthrough;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
@@ -5009,24 +4989,41 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
napi_disable(&adapter->q_vector[q_idx]->napi);
}
-static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
+static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 vxlanctrl;
+ struct udp_tunnel_info ti;
- if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
- IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
- return;
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ adapter->vxlan_port = ti.port;
+ else
+ adapter->geneve_port = ti.port;
- vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
+ ntohs(adapter->vxlan_port) |
+ ntohs(adapter->geneve_port) <<
+ IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
+ return 0;
+}
- if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
- adapter->vxlan_port = 0;
+static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
+ .sync_table = ixgbe_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+};
- if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
- adapter->geneve_port = 0;
-}
+static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
+ .sync_table = ixgbe_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
#ifdef CONFIG_IXGBE_DCB
/**
@@ -5290,7 +5287,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ixgbe_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -5394,9 +5391,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
return err;
}
-static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
+static int ixgbe_macvlan_up(struct net_device *vdev,
+ struct netdev_nested_priv *priv)
{
- struct ixgbe_adapter *adapter = data;
+ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
struct ixgbe_fwd_adapter *accel;
if (!netif_is_macvlan(vdev))
@@ -5413,8 +5411,12 @@ static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
{
+ struct netdev_nested_priv priv = {
+ .data = (void *)adapter,
+ };
+
netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_macvlan_up, adapter);
+ ixgbe_macvlan_up, &priv);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter)
@@ -5516,9 +5518,13 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
return ret;
speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ if (!speed && hw->mac.ops.get_link_capabilities) {
ret = hw->mac.ops.get_link_capabilities(hw, &speed,
&autoneg);
+ speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
+ IXGBE_LINK_SPEED_2_5GB_FULL);
+ }
+
if (ret)
return ret;
@@ -5671,7 +5677,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
- WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
netif_trans_update(adapter->netdev);
@@ -5900,7 +5905,7 @@ dma_engine_disable:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
- /* fall through */
+ fallthrough;
default:
break;
}
@@ -5978,7 +5983,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (tx_ring->xsk_umem) {
+ if (tx_ring->xsk_pool) {
ixgbe_xsk_clean_tx_ring(tx_ring);
goto out;
}
@@ -6174,8 +6179,9 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number that timed out
**/
-static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -6343,7 +6349,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_x550em_a:
- adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
@@ -6352,7 +6357,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
default:
break;
}
- /* fall through */
+ fallthrough;
case ixgbe_mac_X550EM_x:
#ifdef CONFIG_IXGBE_DCB
adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
@@ -6363,14 +6368,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->fcoe.up = 0;
#endif /* IXGBE_DCB */
#endif /* IXGBE_FCOE */
- /* Fall Through */
+ fallthrough;
case ixgbe_mac_X550:
if (hw->mac.type == ixgbe_mac_X550)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
- adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
break;
default:
break;
@@ -6809,8 +6813,7 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
- ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
- udp_tunnel_get_rx_info(netdev);
+ udp_tunnel_nic_reset_ntf(netdev);
return 0;
@@ -6874,32 +6877,20 @@ int ixgbe_close(struct net_device *netdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbe_resume(struct pci_dev *pdev)
+static int __maybe_unused ixgbe_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
u32 err;
adapter->hw.hw_addr = adapter->io_addr;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /*
- * pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
- err = pci_enable_device_mem(pdev);
- if (err) {
- e_dev_err("Cannot enable PCI device from suspend\n");
- return err;
- }
smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
- pci_wake_from_d3(pdev, false);
+ device_wakeup_disable(dev_d);
ixgbe_reset(adapter);
@@ -6917,7 +6908,6 @@ static int ixgbe_resume(struct pci_dev *pdev)
return err;
}
-#endif /* CONFIG_PM */
static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
@@ -6926,9 +6916,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl;
u32 wufc = adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
rtnl_lock();
netif_device_detach(netdev);
@@ -6939,12 +6926,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
-#endif
if (hw->mac.ops.stop_link_on_d3)
hw->mac.ops.stop_link_on_d3(hw);
@@ -6999,26 +6980,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-#ifdef CONFIG_PM
-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ixgbe_suspend(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
int retval;
bool wake;
retval = __ixgbe_shutdown(pdev, &wake);
- if (retval)
- return retval;
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ device_set_wakeup_enable(dev_d, wake);
- return 0;
+ return retval;
}
-#endif /* CONFIG_PM */
static void ixgbe_shutdown(struct pci_dev *pdev)
{
@@ -7064,7 +7037,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7085,15 +7061,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
packets = 0;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+ struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
+ if (!xdp_ring)
+ continue;
restart_queue += xdp_ring->tx_stats.restart_queue;
tx_busy += xdp_ring->tx_stats.tx_busy;
bytes += xdp_ring->stats.bytes;
@@ -7175,7 +7156,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
- /* fall through */
+ fallthrough;
case ixgbe_mac_82599EB:
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
@@ -7924,12 +7905,6 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
- if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
- rtnl_lock();
- adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
- udp_tunnel_get_rx_info(adapter->netdev);
- rtnl_unlock();
- }
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -8084,7 +8059,7 @@ csum_failed:
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- /* fall through */
+ fallthrough;
case offsetof(struct udphdr, check):
break;
case offsetof(struct sctphdr, checksum):
@@ -8096,7 +8071,7 @@ csum_failed:
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
- /* fall through */
+ fallthrough;
default:
skb_checksum_help(skb);
goto csum_failed;
@@ -8539,7 +8514,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
- /* fall through */
+ fallthrough;
default:
return netdev_pick_tx(dev, skb, sb_dev);
}
@@ -8873,7 +8848,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
case SIOCGMIIPHY:
if (!adapter->hw.phy.ops.read_reg)
return -EOPNOTSUPP;
- /* fall through */
+ fallthrough;
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -9048,9 +9023,10 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
#endif /* CONFIG_IXGBE_DCB */
-static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
+static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
+ struct netdev_nested_priv *priv)
{
- struct ixgbe_adapter *adapter = data;
+ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
struct ixgbe_fwd_adapter *accel;
int pool;
@@ -9087,13 +9063,16 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct netdev_nested_priv priv = {
+ .data = (void *)adapter,
+ };
/* flush any stale bits out of the fwd bitmask */
bitmap_clear(adapter->fwd_bitmask, 1, 63);
/* walk through upper devices reassigning pools */
netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
- adapter);
+ &priv);
}
/**
@@ -9267,14 +9246,18 @@ struct upper_walk_data {
u8 queue;
};
-static int get_macvlan_queue(struct net_device *upper, void *_data)
+static int get_macvlan_queue(struct net_device *upper,
+ struct netdev_nested_priv *priv)
{
if (netif_is_macvlan(upper)) {
struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
- struct upper_walk_data *data = _data;
- struct ixgbe_adapter *adapter = data->adapter;
- int ifindex = data->ifindex;
+ struct ixgbe_adapter *adapter;
+ struct upper_walk_data *data;
+ int ifindex;
+ data = (struct upper_walk_data *)priv->data;
+ ifindex = data->ifindex;
+ adapter = data->adapter;
if (vadapter && upper->ifindex == ifindex) {
data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
data->action = data->queue;
@@ -9290,6 +9273,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
{
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int num_vfs = adapter->num_vfs, vf;
+ struct netdev_nested_priv priv;
struct upper_walk_data data;
struct net_device *upper;
@@ -9309,8 +9293,9 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
data.ifindex = ifindex;
data.action = 0;
data.queue = 0;
+ priv.data = (void *)&data;
if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
- get_macvlan_queue, &data)) {
+ get_macvlan_queue, &priv)) {
*action = data.action;
*queue = data.queue;
@@ -9787,26 +9772,6 @@ static int ixgbe_set_features(struct net_device *netdev,
netdev->features = features;
- if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
- if (features & NETIF_F_RXCSUM) {
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
- } else {
- u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
-
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
- }
- }
-
- if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
- if (features & NETIF_F_RXCSUM) {
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
- } else {
- u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
-
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
- }
- }
-
if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
ixgbe_reset_l2fw_offload(adapter);
else if (need_reset)
@@ -9818,118 +9783,6 @@ static int ixgbe_set_features(struct net_device *netdev,
return 1;
}
-/**
- * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
- * @dev: The port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_hw *hw = &adapter->hw;
- __be16 port = ti->port;
- u32 port_shift = 0;
- u32 reg;
-
- if (ti->sa_family != AF_INET)
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
- return;
-
- if (adapter->vxlan_port == port)
- return;
-
- if (adapter->vxlan_port) {
- netdev_info(dev,
- "VXLAN port %d set, not adding port %d\n",
- ntohs(adapter->vxlan_port),
- ntohs(port));
- return;
- }
-
- adapter->vxlan_port = port;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
- if (adapter->geneve_port == port)
- return;
-
- if (adapter->geneve_port) {
- netdev_info(dev,
- "GENEVE port %d set, not adding port %d\n",
- ntohs(adapter->geneve_port),
- ntohs(port));
- return;
- }
-
- port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
- adapter->geneve_port = port;
- break;
- default:
- return;
- }
-
- reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
-}
-
-/**
- * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
- * @dev: The port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
- struct udp_tunnel_info *ti)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- u32 port_mask;
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
- ti->type != UDP_TUNNEL_TYPE_GENEVE)
- return;
-
- if (ti->sa_family != AF_INET)
- return;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
- return;
-
- if (adapter->vxlan_port != ti->port) {
- netdev_info(dev, "VXLAN port %d not found\n",
- ntohs(ti->port));
- return;
- }
-
- port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
- if (adapter->geneve_port != ti->port) {
- netdev_info(dev, "GENEVE port %d not found\n",
- ntohs(ti->port));
- return;
- }
-
- port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
- break;
- default:
- return;
- }
-
- ixgbe_clear_udp_tunnel_port(adapter, port_mask);
- adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
-}
-
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
@@ -10303,7 +10156,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
*/
if (need_reset && prog)
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->xdp_ring[i]->xsk_umem)
+ if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
@@ -10317,12 +10170,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
- case XDP_QUERY_PROG:
- xdp->prog_id = adapter->xdp_prog ?
- adapter->xdp_prog->aux->id : 0;
- return 0;
- case XDP_SETUP_XSK_UMEM:
- return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
@@ -10419,8 +10268,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
- .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
@@ -10663,7 +10512,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
/* only support first port */
if (hw->bus.func != 0)
break;
- /* fall through */
+ fallthrough;
case IXGBE_SUBDEV_ID_82599_SP_560FLR:
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
@@ -10865,6 +10714,18 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
+ break;
+ case ixgbe_mac_x550em_a:
+ netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a;
+ break;
+ default:
+ break;
+ }
+
/* Make sure the SWFW semaphore is in a valid state */
if (hw->mac.ops.init_swfw_sync)
hw->mac.ops.init_swfw_sync(hw);
@@ -11159,8 +11020,8 @@ skip_sriov:
*/
if (hw->mac.ops.set_fw_drv_ver)
hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
- sizeof(ixgbe_driver_version) - 1,
- ixgbe_driver_version);
+ sizeof(UTS_RELEASE) - 1,
+ UTS_RELEASE);
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -11180,10 +11041,14 @@ skip_sriov:
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
- ixgbe_mii_bus_init(hw);
+ err = ixgbe_mii_bus_init(hw);
+ if (err)
+ goto err_netdev;
return 0;
+err_netdev:
+ unregister_netdev(netdev);
err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -11494,16 +11359,15 @@ static const struct pci_error_handlers ixgbe_err_handler = {
.resume = ixgbe_io_resume,
};
+static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
+
static struct pci_driver ixgbe_driver = {
- .name = ixgbe_driver_name,
- .id_table = ixgbe_pci_tbl,
- .probe = ixgbe_probe,
- .remove = ixgbe_remove,
-#ifdef CONFIG_PM
- .suspend = ixgbe_suspend,
- .resume = ixgbe_resume,
-#endif
- .shutdown = ixgbe_shutdown,
+ .name = ixgbe_driver_name,
+ .id_table = ixgbe_pci_tbl,
+ .probe = ixgbe_probe,
+ .remove = ixgbe_remove,
+ .driver.pm = &ixgbe_pm_ops,
+ .shutdown = ixgbe_shutdown,
.sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
};
@@ -11517,7 +11381,7 @@ static struct pci_driver ixgbe_driver = {
static int __init ixgbe_init_module(void)
{
int ret;
- pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
+ pr_info("%s\n", ixgbe_driver_string);
pr_info("%s\n", ixgbe_copyright);
ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);