aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40e/i40e_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_main.c')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2222
1 files changed, 1295 insertions, 927 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ad4cf639430e..2db93d3f6d23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,11 +27,18 @@
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/pci.h>
+#include <linux/bpf.h>
/* Local includes */
#include "i40e.h"
#include "i40e_diag.h"
#include <net/udp_tunnel.h>
+/* All i40e tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -39,9 +46,9 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 14
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -50,13 +57,16 @@ static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporatio
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
-static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static int i40e_reset(struct i40e_pf *pf);
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
@@ -77,7 +87,6 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
@@ -287,8 +296,8 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
**/
void i40e_service_event_schedule(struct i40e_pf *pf)
{
- if (!test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ if (!test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -300,11 +309,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
* device is munged, not just the one netdev port, so go for the full
* reset.
**/
-#ifdef I40E_FCOE
-void i40e_tx_timeout(struct net_device *netdev)
-#else
static void i40e_tx_timeout(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -373,13 +378,13 @@ static void i40e_tx_timeout(struct net_device *netdev)
switch (pf->tx_timeout_recovery_level) {
case 1:
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
break;
case 2:
- set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
break;
case 3:
- set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
@@ -403,21 +408,35 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
}
/**
+ * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
+ * @ring: Tx ring to get statistics from
+ * @stats: statistics entry to be updated
+ **/
+static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 bytes, packets;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+}
+
+/**
* i40e_get_netdev_stats_struct - Get statistics for netdev interface
* @netdev: network interface device structure
*
* Returns the address of the device statistics structure.
* The statistics are actually updated from the service task.
**/
-#ifdef I40E_FCOE
-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
- struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
-#else
-static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
- struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
-#endif
+static void i40e_get_netdev_stats_struct(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_ring *tx_ring, *rx_ring;
@@ -425,11 +444,11 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
- if (test_bit(__I40E_DOWN, &vsi->state))
- return stats;
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
if (!vsi->tx_rings)
- return stats;
+ return;
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -439,15 +458,8 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
+ i40e_get_netdev_stats_struct_tx(tx_ring, stats);
- do {
- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
- packets = tx_ring->stats.packets;
- bytes = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
-
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
rx_ring = &tx_ring[1];
do {
@@ -458,6 +470,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
stats->rx_packets += packets;
stats->rx_bytes += bytes;
+
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
}
rcu_read_unlock();
@@ -469,8 +484,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
stats->rx_dropped = vsi_stats->rx_dropped;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
-
- return stats;
}
/**
@@ -730,55 +743,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
veb->stat_offsets_loaded = true;
}
-#ifdef I40E_FCOE
-/**
- * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
- * @vsi: the VSI that is capable of doing FCoE
- **/
-static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
-{
- struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_fcoe_stats *ofs;
- struct i40e_fcoe_stats *fs; /* device's eth stats */
- int idx;
-
- if (vsi->type != I40E_VSI_FCOE)
- return;
-
- idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
- fs = &vsi->fcoe_stats;
- ofs = &vsi->fcoe_stats_offsets;
-
- i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
- i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
- i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
- i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
- i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
- i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
- i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_last_error, &fs->fcoe_last_error);
- i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
- vsi->fcoe_stat_offsets_loaded,
- &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
-
- vsi->fcoe_stat_offsets_loaded = true;
-}
-
-#endif
/**
* i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated
@@ -797,7 +761,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy;
- u64 tx_lost_interrupt;
struct i40e_ring *p;
u32 rx_page, rx_buf;
u64 bytes, packets;
@@ -808,8 +771,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
u64 tx_p, tx_b;
u16 q;
- if (test_bit(__I40E_DOWN, &vsi->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
ns = i40e_get_vsi_stats_struct(vsi);
@@ -823,7 +786,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
- tx_lost_interrupt = 0;
rx_page = 0;
rx_buf = 0;
rcu_read_lock();
@@ -842,7 +804,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
@@ -861,7 +822,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
- vsi->tx_lost_interrupt = tx_lost_interrupt;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
@@ -1108,13 +1068,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_lpi_count, &nsd->rx_lpi_count);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
- !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
- !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
nsd->fd_atr_status = true;
else
nsd->fd_atr_status = false;
@@ -1136,9 +1096,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
-#ifdef I40E_FCOE
- i40e_update_fcoe_stats(vsi);
-#endif
}
/**
@@ -1260,7 +1217,9 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
struct hlist_head *tmp_del_list,
int vlan_filters)
{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new;
struct hlist_node *h;
int bkt, new_vlan;
@@ -1279,13 +1238,13 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
*/
/* Update the filters about to be added in place */
- hlist_for_each_entry(f, tmp_add_list, hlist) {
- if (vsi->info.pvid && f->vlan != vsi->info.pvid)
- f->vlan = vsi->info.pvid;
- else if (vlan_filters && f->vlan == I40E_VLAN_ANY)
- f->vlan = 0;
- else if (!vlan_filters && f->vlan == 0)
- f->vlan = I40E_VLAN_ANY;
+ hlist_for_each_entry(new, tmp_add_list, hlist) {
+ if (pvid && new->f->vlan != pvid)
+ new->f->vlan = pvid;
+ else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
+ new->f->vlan = 0;
+ else if (!vlan_filters && new->f->vlan == 0)
+ new->f->vlan = I40E_VLAN_ANY;
}
/* Update the remaining active filters */
@@ -1295,12 +1254,12 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
* order to avoid duplicating code for adding the new filter
* then deleting the old filter.
*/
- if ((vsi->info.pvid && f->vlan != vsi->info.pvid) ||
+ if ((pvid && f->vlan != pvid) ||
(vlan_filters && f->vlan == I40E_VLAN_ANY) ||
(!vlan_filters && f->vlan == 0)) {
/* Determine the new vlan we will be adding */
- if (vsi->info.pvid)
- new_vlan = vsi->info.pvid;
+ if (pvid)
+ new_vlan = pvid;
else if (vlan_filters)
new_vlan = 0;
else
@@ -1311,9 +1270,16 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
if (!add_head)
return -ENOMEM;
- /* Put the replacement filter into the add list */
- hash_del(&add_head->hlist);
- hlist_add_head(&add_head->hlist, tmp_add_list);
+ /* Create a temporary i40e_new_mac_filter */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+
+ new->f = add_head;
+ new->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new->hlist, tmp_add_list);
/* Put the original filter into the delete list */
f->state = I40E_FILTER_REMOVE;
@@ -1398,7 +1364,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* to failed, so we don't bother to try sending the filter
* to the hardware.
*/
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
f->state = I40E_FILTER_FAILED;
else
f->state = I40E_FILTER_NEW;
@@ -1440,23 +1406,25 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
* instead of list_for_each_entry().
**/
-static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
{
if (!f)
return;
+ /* If the filter was never added to firmware then we can just delete it
+ * directly and we don't want to set the status to remove or else an
+ * admin queue command will unnecessarily fire.
+ */
if ((f->state == I40E_FILTER_FAILED) ||
(f->state == I40E_FILTER_NEW)) {
- /* this one never got added by the FW. Just remove it,
- * no need to sync anything.
- */
hash_del(&f->hlist);
kfree(f);
} else {
f->state = I40E_FILTER_REMOVE;
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
+
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
/**
@@ -1483,18 +1451,19 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
}
/**
- * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
*
- * Goes through all the macvlan filters and adds a macvlan filter for each
+ * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
+ * go through all the macvlan filters and add a macvlan filter for each
* unique vlan that already exists. If a PVID has been assigned, instead only
* add the macaddr to that VLAN.
*
* Returns last filter added on success, else NULL
**/
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
- const u8 *macaddr)
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+ const u8 *macaddr)
{
struct i40e_mac_filter *f, *add = NULL;
struct hlist_node *h;
@@ -1504,6 +1473,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid));
+ if (!i40e_is_vsi_in_vlan(vsi))
+ return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
+
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE)
continue;
@@ -1516,15 +1488,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
}
/**
- * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * i40e_del_mac_filter - Remove a MAC filter from all VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be removed
*
- * Removes a given MAC address from a VSI, regardless of VLAN
+ * Removes a given MAC address from a VSI regardless of what VLAN it has been
+ * associated with.
*
* Returns 0 for success, or error
**/
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -1553,11 +1526,7 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
*
* Returns 0 on success, negative on failure
**/
-#ifdef I40E_FCOE
-int i40e_set_mac(struct net_device *netdev, void *p)
-#else
static int i40e_set_mac(struct net_device *netdev, void *p)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1574,8 +1543,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return 0;
}
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return -EADDRNOTAVAIL;
if (ether_addr_equal(hw->mac.addr, addr->sa_data))
@@ -1585,8 +1554,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
- i40e_put_mac_in_vlan(vsi, addr->sa_data);
+ i40e_del_mac_filter(vsi, netdev->dev_addr);
+ i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
@@ -1617,17 +1586,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
*
* Setup VSI queue mapping for enabled traffic classes.
**/
-#ifdef I40E_FCOE
-void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
- struct i40e_vsi_context *ctxt,
- u8 enabled_tc,
- bool is_add)
-#else
static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc,
bool is_add)
-#endif
{
struct i40e_pf *pf = vsi->back;
u16 sections = 0;
@@ -1677,11 +1639,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- qcount = num_tc_qps;
- break;
-#endif
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
@@ -1762,14 +1719,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_mac_filter *f;
-
- if (i40e_is_vsi_in_vlan(vsi))
- f = i40e_put_mac_in_vlan(vsi, addr);
- else
- f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
- if (f)
+ if (i40e_add_mac_filter(vsi, addr))
return 0;
else
return -ENOMEM;
@@ -1788,10 +1739,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (i40e_is_vsi_in_vlan(vsi))
- i40e_del_mac_all_vlan(vsi, addr);
- else
- i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
+ i40e_del_mac_filter(vsi, addr);
return 0;
}
@@ -1800,11 +1748,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
* i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
-#ifdef I40E_FCOE
-void i40e_set_rx_mode(struct net_device *netdev)
-#else
static void i40e_set_rx_mode(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1829,16 +1773,15 @@ static void i40e_set_rx_mode(struct net_device *netdev)
}
/**
- * i40e_undo_filter_entries - Undo the changes made to MAC filter entries
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
* @vsi: Pointer to VSI struct
* @from: Pointer to list which contains MAC filter entries - changes to
* those entries needs to be undone.
*
- * MAC filter entries from list were slated to be sent to firmware, either for
- * addition or deletion.
+ * MAC filter entries from this list were slated for deletion.
**/
-static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
- struct hlist_head *from)
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -1853,6 +1796,46 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
}
/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
+ *
+ * MAC filter entries from this list were slated for addition.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
+ struct hlist_head *from)
+{
+ struct i40e_new_mac_filter *new;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(new, h, from, hlist) {
+ /* We can simply free the wrapper structure */
+ hlist_del(&new->hlist);
+ kfree(new);
+ }
+}
+
+/**
+ * i40e_next_entry - Get the next non-broadcast filter from a list
+ * @next: pointer to filter in list
+ *
+ * Returns the next non-broadcast filter in the list. Required so that we
+ * ignore broadcast filters within the list, since these are not handled via
+ * the normal firmware update path.
+ */
+static
+struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
+{
+ hlist_for_each_entry_continue(next, hlist) {
+ if (!is_broadcast_ether_addr(next->f->macaddr))
+ return next;
+ }
+
+ return NULL;
+}
+
+/**
* i40e_update_filter_state - Update filter state based on return data
* from firmware
* @count: Number of filters added
@@ -1865,7 +1848,7 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
static int
i40e_update_filter_state(int count,
struct i40e_aqc_add_macvlan_element_data *add_list,
- struct i40e_mac_filter *add_head)
+ struct i40e_new_mac_filter *add_head)
{
int retval = 0;
int i;
@@ -1884,9 +1867,9 @@ i40e_update_filter_state(int count,
retval++;
}
- add_head = hlist_entry(add_head->hlist.next,
- typeof(struct i40e_mac_filter),
- hlist);
+ add_head = i40e_next_filter(add_head);
+ if (!add_head)
+ break;
}
return retval;
@@ -1943,7 +1926,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
static
void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_aqc_add_macvlan_element_data *list,
- struct i40e_mac_filter *add_head,
+ struct i40e_new_mac_filter *add_head,
int num_add, bool *promisc_changed)
{
struct i40e_hw *hw = &vsi->back->hw;
@@ -1955,7 +1938,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
if (fcnt != num_add) {
*promisc_changed = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
i40e_aq_str(hw, aq_err),
@@ -1971,10 +1954,12 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
* This function sets or clears the promiscuous broadcast flags for VLAN
* filters in order to properly receive broadcast frames. Assumes that only
* broadcast filters are passed.
+ *
+ * Returns status indicating success or failure;
**/
-static
-void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
- struct i40e_mac_filter *f)
+static i40e_status
+i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+ struct i40e_mac_filter *f)
{
bool enable = f->state == I40E_FILTER_NEW;
struct i40e_hw *hw = &vsi->back->hw;
@@ -1993,15 +1978,13 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
NULL);
}
- if (aq_ret) {
+ if (aq_ret)
dev_warn(&vsi->back->pdev->dev,
"Error %s setting broadcast promiscuous mode on %s\n",
i40e_aq_str(hw, hw->aq.asq_last_status),
vsi_name);
- f->state = I40E_FILTER_FAILED;
- } else if (enable) {
- f->state = I40E_FILTER_ACTIVE;
- }
+
+ return aq_ret;
}
/**
@@ -2015,7 +1998,8 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
struct hlist_head tmp_add_list, tmp_del_list;
- struct i40e_mac_filter *f, *add_head = NULL;
+ struct i40e_mac_filter *f;
+ struct i40e_new_mac_filter *new, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw;
unsigned int failed_filters = 0;
unsigned int vlan_filters = 0;
@@ -2037,7 +2021,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
struct i40e_aqc_add_macvlan_element_data *add_list;
struct i40e_aqc_remove_macvlan_element_data *del_list;
- while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
+ while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
usleep_range(1000, 2000);
pf = vsi->back;
@@ -2069,8 +2053,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
continue;
}
if (f->state == I40E_FILTER_NEW) {
- hash_del(&f->hlist);
- hlist_add_head(&f->hlist, &tmp_add_list);
+ /* Create a temporary i40e_new_mac_filter */
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ goto err_no_memory_locked;
+
+ /* Store pointer to the real filter */
+ new->f = f;
+ new->state = f->state;
+
+ /* Add it to the hash list */
+ hlist_add_head(&new->hlist, &tmp_add_list);
}
/* Count the number of active (current and new) VLAN
@@ -2105,7 +2098,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* handle broadcast filters by updating the broadcast
- * promiscuous flag instead of deleting a MAC filter.
+ * promiscuous flag and release filter list.
*/
if (is_broadcast_ether_addr(f->macaddr)) {
i40e_aqc_broadcast_filter(vsi, vsi_name, f);
@@ -2163,36 +2156,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
goto err_no_memory;
num_add = 0;
- hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state)) {
- f->state = I40E_FILTER_FAILED;
+ hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
+ vsi->state)) {
+ new->state = I40E_FILTER_FAILED;
continue;
}
/* handle broadcast filters by updating the broadcast
* promiscuous flag instead of adding a MAC filter.
*/
- if (is_broadcast_ether_addr(f->macaddr)) {
- u64 key = i40e_addr_to_hkey(f->macaddr);
- i40e_aqc_broadcast_filter(vsi, vsi_name, f);
-
- hlist_del(&f->hlist);
- hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ if (is_broadcast_ether_addr(new->f->macaddr)) {
+ if (i40e_aqc_broadcast_filter(vsi, vsi_name,
+ new->f))
+ new->state = I40E_FILTER_FAILED;
+ else
+ new->state = I40E_FILTER_ACTIVE;
continue;
}
/* add to add array */
if (num_add == 0)
- add_head = f;
+ add_head = new;
cmd_flags = 0;
- ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
- if (f->vlan == I40E_VLAN_ANY) {
+ ether_addr_copy(add_list[num_add].mac_addr,
+ new->f->macaddr);
+ if (new->f->vlan == I40E_VLAN_ANY) {
add_list[num_add].vlan_tag = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
} else {
add_list[num_add].vlan_tag =
- cpu_to_le16((u16)(f->vlan));
+ cpu_to_le16((u16)(new->f->vlan));
}
add_list[num_add].queue_number = 0;
/* set invalid match method for later detection */
@@ -2218,11 +2212,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
* the VSI's list.
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
- hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
- u64 key = i40e_addr_to_hkey(f->macaddr);
-
- hlist_del(&f->hlist);
- hash_add(vsi->mac_filter_hash, &f->hlist, key);
+ hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
+ /* Only update the state if we're still NEW */
+ if (new->f->state == I40E_FILTER_NEW)
+ new->f->state = new->state;
+ hlist_del(&new->hlist);
+ kfree(new);
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
kfree(add_list);
@@ -2250,20 +2245,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
* safely exit if we didn't just enter, we no longer have any failed
* filters, and we have reduced filters below the threshold value.
*/
- if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
+ if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
!promisc_changed && !failed_filters &&
(vsi->active_filters < vsi->promisc_threshold)) {
dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n",
vsi_name);
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
promisc_changed = true;
vsi->promisc_threshold = 0;
}
/* if the VF is not trusted do not do promisc */
if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
goto out;
}
@@ -2286,14 +2281,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
- if ((changed_flags & IFF_PROMISC) ||
- (promisc_changed &&
- test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
+
+ if ((changed_flags & IFF_PROMISC) || promisc_changed) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
- test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state));
+ test_bit(__I40E_VSI_OVERFLOW_PROMISC,
+ vsi->state));
if ((vsi->type == I40E_VSI_MAIN) &&
(pf->lan_veb != I40E_NO_VEB) &&
!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
@@ -2376,19 +2370,19 @@ out:
if (retval)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
return retval;
err_no_memory:
/* Restore elements on the temporary add and delete lists */
spin_lock_bh(&vsi->mac_filter_hash_lock);
err_no_memory_locked:
- i40e_undo_filter_entries(vsi, &tmp_del_list);
- i40e_undo_filter_entries(vsi, &tmp_add_list);
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi, &tmp_add_list);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
return -ENOMEM;
}
@@ -2419,6 +2413,18 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
/**
+ * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: the vsi
+ **/
+static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return I40E_RXBUFFER_2048;
+ else
+ return I40E_RXBUFFER_3072;
+}
+
+/**
* i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2429,13 +2435,22 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (frame_size > i40e_max_xdp_frame_size(vsi))
+ return -EINVAL;
+ }
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
- i40e_notify_client_of_l2_param_changes(vsi);
+ pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+ I40E_FLAG_CLIENT_L2_CHANGE);
return 0;
}
@@ -2574,12 +2589,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
/**
* i40e_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be added (0 = untagged only , -1 = any)
+ * @vid: VLAN id to be added
**/
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
{
int err;
+ if (!vid || vsi->info.pvid)
+ return -EINVAL;
+
/* Locked once because all functions invoked below iterates list*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
err = i40e_add_vlan_all_mac(vsi, vid);
@@ -2622,10 +2640,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
/**
* i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be removed (0 = untagged only , -1 = any)
+ * @vid: VLAN id to be removed
**/
-void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
{
+ if (!vid || vsi->info.pvid)
+ return;
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_rm_vlan_all_mac(vsi, vid);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2643,13 +2664,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
*
* net_device_ops implementation for adding vlan ids
**/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-#else
static int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -2680,13 +2696,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
*
* net_device_ops implementation for removing vlan ids
**/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
-#else
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -2819,6 +2830,12 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
+
return err;
}
@@ -2832,12 +2849,17 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
- if (!vsi->tx_rings)
- return;
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ }
- for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- i40e_free_tx_resources(vsi->tx_rings[i]);
+ if (vsi->xdp_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ i40e_free_tx_resources(vsi->xdp_rings[i]);
+ }
}
/**
@@ -2856,9 +2878,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
- i40e_fcoe_setup_ddp_resources(vsi);
-#endif
return err;
}
@@ -2878,9 +2897,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
- i40e_fcoe_free_ddp_resources(vsi);
-#endif
}
/**
@@ -2951,9 +2967,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.qlen = ring->count;
tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED));
-#ifdef I40E_FCOE
- tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* FDIR VSI tx ring can still use RS bit and writebacks */
if (vsi->type != I40E_VSI_FDIR)
@@ -3034,7 +3047,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->rx_buf_len = vsi->rx_buf_len;
- rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
@@ -3056,9 +3070,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.l2tsel = 1;
/* this controls whether VLAN is stripped from inner headers */
rx_ctx.showiv = 0;
-#ifdef I40E_FCOE
- rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
/* set the prefena field to 1 because the manual says to */
rx_ctx.prefena = 1;
@@ -3080,6 +3091,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
return -ENOMEM;
}
+ /* configure Rx buffer alignment */
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ clear_ring_build_skb_enabled(ring);
+ else
+ set_ring_build_skb_enabled(ring);
+
/* cache tail for quicker writes, and clear the reg before use */
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -3103,6 +3120,12 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(vsi->tx_rings[i]);
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return err;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
+
return err;
}
@@ -3117,27 +3140,21 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
- vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
- + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = I40E_RXBUFFER_2048;
-
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
-
-#ifdef I40E_FCOE
- /* setup rx buffer for FCoE */
- if ((vsi->type == I40E_VSI_FCOE) &&
- (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
- vsi->rx_buf_len = I40E_RXBUFFER_3072;
- vsi->max_frame = I40E_RXBUFFER_3072;
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+ vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+#if (PAGE_SIZE < 8192)
+ } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
+ I40E_RXBUFFER_2048;
}
-#endif /* I40E_FCOE */
- /* round up for the chip's needs */
- vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
- BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
-
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_configure_rx_ring(vsi->rx_rings[i]);
@@ -3217,6 +3234,12 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
+ /* Reset FDir counters as we're replaying all existing filters */
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+
hlist_for_each_entry_safe(filter, node,
&pf->fdir_filter_list, fdir_node) {
i40e_add_del_fdir(vsi, filter, true);
@@ -3247,6 +3270,7 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi)
**/
static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
{
+ bool has_xdp = i40e_enabled_xdp_vsi(vsi);
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u16 vector;
@@ -3272,33 +3296,45 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
q_vector->tx.itr);
wr32(hw, I40E_PFINT_RATEN(vector - 1),
- INTRL_USEC_TO_REG(vsi->int_rate_limit));
+ i40e_intrl_usec_to_reg(vsi->int_rate_limit));
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
+ u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
u32 val;
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(qp), val);
+ if (has_xdp) {
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_RX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX <<
+ I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
/* Terminate the linked list */
if (q == (q_vector->num_ringpairs - 1))
- val |= (I40E_QUEUE_END_OF_LIST
- << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ val |= (I40E_QUEUE_END_OF_LIST <<
+ I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_QINT_TQCTL(qp), val);
qp++;
@@ -3352,6 +3388,7 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
**/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{
+ u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
@@ -3372,12 +3409,22 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_LNKLST0, 0);
/* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(0), val);
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_TX
+ << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ }
+
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
@@ -3541,11 +3588,24 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int base = vsi->base_vector;
int i;
+ /* disable interrupt causation from each queue */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
+ u32 val;
+
+ val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
+ val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ continue;
+ wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
}
+ /* disable each interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = vsi->base_vector;
i < (vsi->num_q_vectors + vsi->base_vector); i++)
@@ -3624,10 +3684,10 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->sw_int_count++;
if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
- (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+ (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
- icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
}
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -3641,29 +3701,29 @@ static irqreturn_t i40e_intr(int irq, void *data)
* this is not a performance path and napi_schedule()
* can deal with rescheduling.
*/
- if (!test_bit(__I40E_DOWN, &pf->state))
+ if (!test_bit(__I40E_DOWN, pf->state))
napi_schedule_irqoff(&q_vector->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
- set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
}
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
- set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
}
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
}
if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
- if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
@@ -3674,7 +3734,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->globr_count++;
} else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
- set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
+ set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
}
}
@@ -3707,7 +3767,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
@@ -3717,7 +3777,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- if (!test_bit(__I40E_DOWN, &pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf, false);
}
@@ -3846,6 +3906,16 @@ static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
+ /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ q_vector->tx.count++;
+ }
+
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
q_vector->rx.ring = rx_ring;
@@ -3929,11 +3999,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
* This is used by netconsole to send skbs without having to re-enable
* interrupts. It's not called while the normal interrupt routine is executing.
**/
-#ifdef I40E_FCOE
-void i40e_netpoll(struct net_device *netdev)
-#else
static void i40e_netpoll(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -3941,7 +4007,7 @@ static void i40e_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &vsi->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -3953,6 +4019,8 @@ static void i40e_netpoll(struct net_device *netdev)
}
#endif
+#define I40E_QTX_ENA_WAIT_COUNT 50
+
/**
* i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
* @pf: the PF being configured
@@ -3983,6 +4051,77 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_tx_q - Start or stop a particular Tx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 tx_reg;
+ int i;
+
+ /* warn the TX unit of coming changes */
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+ if (!enable)
+ usleep_range(10, 20);
+
+ for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ return;
+
+ /* turn on/off the queue */
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ } else {
+ tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
+
+ wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+}
+
+/**
+ * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
+ * @seid: VSI SEID
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @is_xdp: true if the queue is used for XDP
+ * @enable: start or stop the queue
+ **/
+static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
+ bool is_xdp, bool enable)
+{
+ int ret;
+
+ i40e_control_tx_q(pf, pf_q, enable);
+
+ /* wait for the change to finish */
+ ret = i40e_pf_txq_wait(pf, pf_q, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d %sTx ring %d %sable timeout\n",
+ seid, (is_xdp ? "XDP " : ""), pf_q,
+ (enable ? "en" : "dis"));
+ }
+
+ return ret;
+}
+
+/**
* i40e_vsi_control_tx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -3990,54 +4129,26 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int i, j, pf_q, ret = 0;
- u32 tx_reg;
+ int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q,
+ false /*is xdp*/, enable);
+ if (ret)
+ break;
- /* warn the TX unit of coming changes */
- i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
- if (!enable)
- usleep_range(10, 20);
-
- for (j = 0; j < 50; j++) {
- tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
- ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
- break;
- usleep_range(1000, 2000);
- }
- /* Skip if the queue is already in the requested state */
- if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- continue;
-
- /* turn on/off the queue */
- if (enable) {
- wr32(hw, I40E_QTX_HEAD(pf_q), 0);
- tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
- } else {
- tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- }
-
- wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
- /* No waiting for the Tx queue to disable */
- if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
+ if (!i40e_enabled_xdp_vsi(vsi))
continue;
- /* wait for the change to finish */
- ret = i40e_pf_txq_wait(pf, pf_q, enable);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "VSI seid %d Tx ring %d %sable timeout\n",
- vsi->seid, pf_q, (enable ? "en" : "dis"));
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q + vsi->alloc_queue_pairs,
+ true /*is xdp*/, enable);
+ if (ret)
break;
- }
}
- if (hw->revision_id == 0)
- mdelay(50);
return ret;
}
@@ -4071,6 +4182,43 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
+ * i40e_control_rx_q - Start or stop a particular Rx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 rx_reg;
+ int i;
+
+ for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ return;
+
+ /* turn on/off the queue */
+ if (enable)
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ else
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+
+ wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+}
+
+/**
* i40e_vsi_control_rx - Start or stop a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
@@ -4078,33 +4226,11 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int i, j, pf_q, ret = 0;
- u32 rx_reg;
+ int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- for (j = 0; j < 50; j++) {
- rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
- ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
- break;
- usleep_range(1000, 2000);
- }
-
- /* Skip if the queue is already in the requested state */
- if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
-
- /* turn on/off the queue */
- if (enable)
- rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
- else
- rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
- /* No waiting for the Tx queue to disable */
- if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
- continue;
+ i40e_control_rx_q(pf, pf_q, enable);
/* wait for the change to finish */
ret = i40e_pf_rxq_wait(pf, pf_q, enable);
@@ -4116,6 +4242,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
}
}
+ /* Due to HW errata, on Rx disable only, the register can indicate done
+ * before it really is. Needs 50ms to be sure
+ */
+ if (!enable)
+ mdelay(50);
+
return ret;
}
@@ -4142,6 +4274,10 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
+ /* When port TX is suspended, don't wait */
+ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
+ return i40e_vsi_stop_rings_no_wait(vsi);
+
/* do rx first for enable and last for disable
* Ignore return value, we need to shutdown whatever we can
*/
@@ -4150,6 +4286,29 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
+ * @vsi: the VSI being shutdown
+ *
+ * This function stops all the rings for a VSI but does not delay to verify
+ * that rings have been disabled. It is expected that the caller is shutting
+ * down multiple VSIs at once and will delay together for all the VSIs after
+ * initiating the shutdown. This is particularly useful for shutting down lots
+ * of VFs together. Otherwise, a large delay can be incurred while configuring
+ * each VSI in serial.
+ **/
+void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i, pf_q;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ i40e_control_tx_q(pf, pf_q, false);
+ i40e_control_rx_q(pf, pf_q, false);
+ }
+}
+
+/**
* i40e_vsi_free_irq - Free the irq association with the OS
* @vsi: the VSI being configured
**/
@@ -4374,8 +4533,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_enable(&q_vector->napi);
+ }
}
/**
@@ -4389,8 +4552,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_disable(&q_vector->napi);
+ }
}
/**
@@ -4399,17 +4566,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
**/
static void i40e_vsi_close(struct i40e_vsi *vsi)
{
- bool reset = false;
-
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ struct i40e_pf *pf = vsi->back;
+ if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
i40e_down(vsi);
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0;
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- reset = true;
- i40e_notify_client_of_netdev_close(vsi, reset);
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ pf->flags |= I40E_FLAG_CLIENT_RESET;
}
/**
@@ -4418,18 +4584,10 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
**/
static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
{
- if (test_bit(__I40E_DOWN, &vsi->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
- /* No need to disable FCoE VSI when Tx suspended */
- if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
- vsi->type == I40E_VSI_FCOE) {
- dev_dbg(&vsi->back->pdev->dev,
- "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
- return;
- }
-
- set_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
else
@@ -4442,10 +4600,9 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
**/
static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
{
- if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
+ if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
return;
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
@@ -4480,21 +4637,20 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
}
}
-#ifdef CONFIG_I40E_DCB
/**
* i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
* @vsi: the VSI being configured
*
- * This function waits for the given VSI's queues to be disabled.
+ * Wait until all queues on a given VSI have been disabled.
**/
-static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
+int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- /* Check and wait for the disable status of the queue */
+ /* Check and wait for the Tx queue */
ret = i40e_pf_txq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -4502,11 +4658,21 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
vsi->seid, pf_q);
return ret;
}
- }
- pf_q = vsi->base_queue;
- for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- /* Check and wait for the disable status of the queue */
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto wait_rx;
+
+ /* Check and wait for the XDP Tx queue */
+ ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
+ false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d XDP Tx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ return ret;
+ }
+wait_rx:
+ /* Check and wait for the Rx queue */
ret = i40e_pf_rxq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -4519,6 +4685,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
return 0;
}
+#ifdef CONFIG_I40E_DCB
/**
* i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
* @pf: the PF
@@ -4531,8 +4698,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
int v, ret = 0;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
- /* No need to wait for FCoE VSI queues */
- if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
+ if (pf->vsi[v]) {
ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret)
break;
@@ -4550,16 +4716,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
* @vsi: Pointer to VSI struct
*
* This function checks specified queue for given VSI. Detects hung condition.
- * Sets hung bit since it is two step process. Before next run of service task
- * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
- * hung condition remain unchanged and during subsequent run, this function
- * issues SW interrupt to recover from hung condition.
+ * We proactively detect hung TX queues by checking if interrupts are disabled
+ * but there are pending descriptors. If it appears hung, attempt to recover
+ * by triggering a SW interrupt.
**/
static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
{
struct i40e_ring *tx_ring = NULL;
struct i40e_pf *pf;
- u32 head, val, tx_pending_hw;
+ u32 val, tx_pending;
int i;
pf = vsi->back;
@@ -4585,45 +4750,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
- head = i40e_get_head(tx_ring);
+ tx_pending = i40e_get_tx_pending(tx_ring);
- tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
-
- /* HW is done executing descriptors, updated HEAD write back,
- * but SW hasn't processed those descriptors. If interrupt is
- * not generated from this point ON, it could result into
- * dev_watchdog detecting timeout on those netdev_queue,
- * hence proactively trigger SW interrupt.
- */
- if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
- /* NAPI Poll didn't run and clear since it was set */
- if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
- &tx_ring->q_vector->hung_detected)) {
- netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
- vsi->seid, q_idx, tx_pending_hw,
- tx_ring->next_to_clean, head,
- tx_ring->next_to_use,
- readl(tx_ring->tail));
- netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
- vsi->seid, q_idx, val);
- i40e_force_wb(vsi, tx_ring->q_vector);
- } else {
- /* First Chance - detected possible hung */
- set_bit(I40E_Q_VECTOR_HUNG_DETECT,
- &tx_ring->q_vector->hung_detected);
- }
- }
-
- /* This is the case where we have interrupts missing,
- * so the tx_pending in HW will most likely be 0, but we
- * will have tx_pending in SW since the WB happened but the
- * interrupt got lost.
+ /* Interrupts are disabled and TX pending is non-zero,
+ * trigger the SW interrupt (don't wait). Worst case
+ * there will be one extra interrupt which may result
+ * into not cleaning any queues because queues are cleaned.
*/
- if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
- (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
- if (napi_reschedule(&tx_ring->q_vector->napi))
- tx_ring->tx_stats.tx_lost_interrupt++;
- }
+ if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+ i40e_force_wb(vsi, tx_ring->q_vector);
}
/**
@@ -4647,8 +4782,8 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf)
return;
/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
- if (test_bit(__I40E_DOWN, &vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
return;
/* Make sure type is MAIN VSI */
@@ -5154,20 +5289,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
continue;
/* - Enable all TCs for the LAN VSI
-#ifdef I40E_FCOE
- * - For FCoE VSI only enable the TC configured
- * as per the APP TLV
-#endif
* - For all others keep them at TC0 for now
*/
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
-#ifdef I40E_FCOE
- if (pf->vsi[v]->type == I40E_VSI_FCOE)
- tc_map = i40e_get_fcoe_tc_map(pf);
-#endif /* #ifdef I40E_FCOE */
ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
if (ret) {
@@ -5203,7 +5330,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
@@ -5234,10 +5361,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
(hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
dev_info(&pf->pdev->dev,
"DCBX offload is not supported or is disabled for this PF.\n");
-
- if (pf->flags & I40E_FLAG_MFP_ENABLED)
- goto out;
-
} else {
/* When status is not DISABLED then DCBX in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
@@ -5276,6 +5399,8 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
enum i40e_aq_link_speed new_speed;
char *speed = "Unknown";
char *fc = "Unknown";
+ char *fec = "";
+ char *an = "";
new_speed = vsi->back->hw.phy.link_info.link_speed;
@@ -5335,8 +5460,23 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
break;
}
- netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
- speed, fc);
+ if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
+ fec = ", FEC: None";
+ an = ", Autoneg: False";
+
+ if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = ", Autoneg: True";
+
+ if (vsi->back->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = ", FEC: CL74 FC-FEC/BASE-R";
+ else if (vsi->back->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_RS_ENA)
+ fec = ", FEC: CL108 RS-FEC";
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n",
+ speed, fec, an, fc);
}
/**
@@ -5358,7 +5498,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if (err)
return err;
- clear_bit(__I40E_DOWN, &vsi->state);
+ clear_bit(__I40E_VSI_DOWN, vsi->state);
i40e_napi_enable_all(vsi);
i40e_vsi_enable_irq(vsi);
@@ -5381,13 +5521,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* replay FDIR SB filters */
if (vsi->type == I40E_VSI_FDIR) {
/* reset fd counters */
- pf->fd_add_err = pf->fd_atr_cnt = 0;
- if (pf->fd_tcp_rule > 0) {
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
- pf->fd_tcp_rule = 0;
- }
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
i40e_fdir_filter_restore(vsi);
}
@@ -5412,12 +5547,12 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
WARN_ON(in_interrupt());
- while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
usleep_range(1000, 2000);
i40e_down(vsi);
i40e_up(vsi);
- clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
}
/**
@@ -5444,7 +5579,7 @@ void i40e_down(struct i40e_vsi *vsi)
int i;
/* It is assumed that the caller of this function
- * sets the vsi->state __I40E_DOWN bit.
+ * sets the vsi->state __I40E_VSI_DOWN bit.
*/
if (vsi->netdev) {
netif_carrier_off(vsi->netdev);
@@ -5456,11 +5591,11 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_clean_tx_ring(vsi->xdp_rings[i]);
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
- i40e_notify_client_of_netdev_close(vsi, false);
-
}
/**
@@ -5521,17 +5656,16 @@ exit:
return ret;
}
-#ifdef I40E_FCOE
-int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc)
-#else
-static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+static int __i40e_setup_tc(struct net_device *netdev, u32 handle,
+ u32 chain_index, __be16 proto,
struct tc_to_netdev *tc)
-#endif
{
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return i40e_setup_tc(netdev, tc->tc);
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return i40e_setup_tc(netdev, tc->mqprio->num_tc);
}
/**
@@ -5554,8 +5688,8 @@ int i40e_open(struct net_device *netdev)
int err;
/* disallow open during test or if eeprom is broken */
- if (test_bit(__I40E_TESTING, &pf->state) ||
- test_bit(__I40E_BAD_EEPROM, &pf->state))
+ if (test_bit(__I40E_TESTING, pf->state) ||
+ test_bit(__I40E_BAD_EEPROM, pf->state))
return -EBUSY;
netif_carrier_off(netdev);
@@ -5584,6 +5718,8 @@ int i40e_open(struct net_device *netdev)
* Finish initialization of the VSI.
*
* Returns 0 on success, negative value on failure
+ *
+ * Note: expects to be called while under rtnl_lock()
**/
int i40e_vsi_open(struct i40e_vsi *vsi)
{
@@ -5647,7 +5783,7 @@ err_setup_rx:
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
if (vsi == pf->vsi[pf->lan_vsi])
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
return err;
}
@@ -5662,6 +5798,7 @@ err_setup_tx:
static void i40e_fdir_filter_exit(struct i40e_pf *pf)
{
struct i40e_fdir_filter *filter;
+ struct i40e_flex_pit *pit_entry, *tmp;
struct hlist_node *node2;
hlist_for_each_entry_safe(filter, node2,
@@ -5669,7 +5806,43 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
hlist_del(&filter->fdir_node);
kfree(filter);
}
+
+ list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
+ list_del(&pit_entry->list);
+ kfree(pit_entry);
+ }
+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+
+ list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
+ list_del(&pit_entry->list);
+ kfree(pit_entry);
+ }
+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
pf->fdir_pf_active_filters = 0;
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+
+ /* Reprogram the default input set for TCP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for UDP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for SCTP/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+ /* Reprogram the default input set for Other/IPv4 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@@ -5696,12 +5869,14 @@ int i40e_close(struct net_device *netdev)
* i40e_do_reset - Start a PF or Core Reset sequence
* @pf: board private structure
* @reset_flags: which reset is requested
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
*
* The essential difference in resets is that the PF Reset
* doesn't clear the packet buffers, doesn't reset the PE
* firmware, and doesn't bother the other PFs on the chip.
**/
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
u32 val;
@@ -5747,7 +5922,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* for the Core Reset.
*/
dev_dbg(&pf->pdev->dev, "PFR requested\n");
- i40e_handle_reset_warning(pf);
+ i40e_handle_reset_warning(pf, lock_acquired);
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -5759,10 +5934,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
- test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
+ test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ vsi->state))
i40e_vsi_reinit_locked(pf->vsi[v]);
- clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
- }
}
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v;
@@ -5773,10 +5947,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
- test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
- set_bit(__I40E_DOWN, &vsi->state);
+ test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ vsi->state)) {
+ set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi);
- clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
}
}
} else {
@@ -5916,7 +6090,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
- set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+ set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf);
@@ -5925,7 +6099,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
ret = i40e_resume_port_tx(pf);
- clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+ clear_bit(__I40E_PORT_SUSPENDED, pf->state);
/* In case of error no point in resuming VSIs */
if (ret)
goto exit;
@@ -5934,12 +6108,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
ret = i40e_pf_wait_queues_disabled(pf);
if (ret) {
/* Schedule PF reset to recover */
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
- /* Notify the client for the DCB changes */
- i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
+ pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+ I40E_FLAG_CLIENT_L2_CHANGE);
}
exit:
@@ -5956,7 +6130,7 @@ exit:
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
{
rtnl_lock();
- i40e_do_reset(pf, reset_flags);
+ i40e_do_reset(pf, reset_flags, true);
rtnl_unlock();
}
@@ -6049,34 +6223,33 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
u32 fcnt_prog, fcnt_avail;
struct hlist_node *node;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
return;
- /* Check if, FD SB or ATR was auto disabled and if there is enough room
- * to re-enable
- */
+ /* Check if we have enough room to re-enable FDir SB capability. */
fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
(pf->fd_add_err == 0) ||
(i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
- /* Wait for some more space to be available to turn on ATR. We also
- * must check that no existing ntuple rules for TCP are in effect
+ /* We should wait for even more space before re-enabling ATR.
+ * Additionally, we cannot enable ATR as long as we still have TCP SB
+ * rules active.
*/
- if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->fd_tcp_rule == 0)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
+ (pf->fd_tcp4_filter_cnt == 0)) {
+ if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
@@ -6127,7 +6300,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6146,9 +6319,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
- if (!disable_atr)
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ if (!disable_atr && !pf->fd_tcp4_filter_cnt)
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
@@ -6178,10 +6351,10 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
{
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &pf->state))
+ if (test_bit(__I40E_DOWN, pf->state))
return;
- if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
i40e_fdir_flush_and_replay(pf);
i40e_fdir_check_and_reenable(pf);
@@ -6195,14 +6368,11 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
**/
static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
{
- if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
+ if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
return;
switch (vsi->type) {
case I40E_VSI_MAIN:
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
-#endif
if (!vsi->netdev || !vsi->netdev_registered)
break;
@@ -6271,7 +6441,16 @@ static void i40e_link_event(struct i40e_pf *pf)
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
status = i40e_get_link_status(&pf->hw, &new_link);
- if (status) {
+
+ /* On success, disable temp link polling */
+ if (status == I40E_SUCCESS) {
+ if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
+ pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
+ } else {
+ /* Enable link polling temporarily until i40e_get_link_status
+ * returns I40E_SUCCESS
+ */
+ pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
status);
return;
@@ -6282,11 +6461,11 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link &&
new_link_speed == old_link_speed &&
- (test_bit(__I40E_DOWN, &vsi->state) ||
+ (test_bit(__I40E_VSI_DOWN, vsi->state) ||
new_link == netif_carrier_ok(vsi->netdev)))
return;
- if (!test_bit(__I40E_DOWN, &vsi->state))
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
@@ -6313,8 +6492,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &pf->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
/* make sure we don't do these things too often */
@@ -6323,7 +6502,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
- if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+ if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+ (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
@@ -6340,7 +6520,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
i40e_update_veb_stats(pf->veb[i]);
}
- i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
+ i40e_ptp_rx_hang(pf);
+ i40e_ptp_tx_hang(pf);
}
/**
@@ -6351,44 +6532,42 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
- rtnl_lock();
- if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_REINIT_REQUESTED);
- clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ clear_bit(__I40E_REINIT_REQUESTED, pf->state);
}
- if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
- clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
- clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
- clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
}
- if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
+ if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
reset_flags |= BIT(__I40E_DOWN_REQUESTED);
- clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
+ clear_bit(__I40E_DOWN_REQUESTED, pf->state);
}
/* If there's a recovery already waiting, it takes
* precedence before starting a new reset sequence.
*/
- if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
- i40e_handle_reset_warning(pf);
- goto unlock;
+ if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ i40e_prep_for_reset(pf, false);
+ i40e_reset(pf);
+ i40e_rebuild(pf, false, false);
}
/* If we're already down or resetting, just bail */
if (reset_flags &&
- !test_bit(__I40E_DOWN, &pf->state) &&
- !test_bit(__I40E_CONFIG_BUSY, &pf->state))
- i40e_do_reset(pf, reset_flags);
-
-unlock:
- rtnl_unlock();
+ !test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ i40e_do_reset(pf, reset_flags, false);
+ }
}
/**
@@ -6433,7 +6612,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u32 val;
/* Do not run clean AQ when PF reset fails */
- if (test_bit(__I40E_RESET_FAILED, &pf->state))
+ if (test_bit(__I40E_RESET_FAILED, pf->state))
return;
/* check for error indications */
@@ -6534,9 +6713,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
opcode);
break;
}
- } while (pending && (i++ < pf->adminq_work_limit));
+ } while (i++ < pf->adminq_work_limit);
+
+ if (i < pf->adminq_work_limit)
+ clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
- clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
/* re-enable Admin queue interrupt cause */
val = rd32(hw, I40E_PFINT_ICR0_ENA);
val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -6561,13 +6742,13 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
if (err) {
dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
err);
- set_bit(__I40E_BAD_EEPROM, &pf->state);
+ set_bit(__I40E_BAD_EEPROM, pf->state);
}
}
- if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
- clear_bit(__I40E_BAD_EEPROM, &pf->state);
+ clear_bit(__I40E_BAD_EEPROM, pf->state);
}
}
@@ -6874,17 +7055,19 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
*
* Close up the VFs and other things in prep for PF Reset.
**/
-static void i40e_prep_for_reset(struct i40e_pf *pf)
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret = 0;
u32 v;
- clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
- if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
+ if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return;
if (i40e_check_asq_alive(&pf->hw))
i40e_vc_notify_reset(pf);
@@ -6892,7 +7075,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* quiesce the VSIs and their queues that are not already DOWN */
+ /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
+ if (!lock_acquired)
+ rtnl_lock();
i40e_pf_quiesce_all_vsi(pf);
+ if (!lock_acquired)
+ rtnl_unlock();
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
@@ -6927,31 +7115,86 @@ static void i40e_send_version(struct i40e_pf *pf)
}
/**
- * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * i40e_get_oem_version - get OEM specific version information
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_get_oem_version(struct i40e_hw *hw)
+{
+ u16 block_offset = 0xffff;
+ u16 block_length = 0;
+ u16 capabilities = 0;
+ u16 gen_snap = 0;
+ u16 release = 0;
+
+#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
+#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
+#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
+#define I40E_NVM_OEM_GEN_OFFSET 0x02
+#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
+#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
+#define I40E_NVM_OEM_LENGTH 3
+
+ /* Check if pointer to OEM version block is valid. */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
+ if (block_offset == 0xffff)
+ return;
+
+ /* Check if OEM version block has correct length. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
+ &block_length);
+ if (block_length < I40E_NVM_OEM_LENGTH)
+ return;
+
+ /* Check if OEM version format is as expected. */
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
+ &capabilities);
+ if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
+ return;
+
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
+ &gen_snap);
+ i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
+ &release);
+ hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
+ hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
+}
+
+/**
+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
* @pf: board private structure
- * @reinit: if the Main VSI needs to re-initialized.
**/
-static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+static int i40e_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- u8 set_fc_aq_fail = 0;
i40e_status ret;
- u32 val;
- u32 v;
- /* Now we wait for GRST to settle out.
- * We don't have to delete the VEBs or VSIs from the hw switch
- * because the reset will make them disappear.
- */
ret = i40e_pf_reset(hw);
if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
- set_bit(__I40E_RESET_FAILED, &pf->state);
- goto clear_recovery;
+ set_bit(__I40E_RESET_FAILED, pf->state);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ } else {
+ pf->pfr_count++;
}
- pf->pfr_count++;
+ return ret;
+}
- if (test_bit(__I40E_DOWN, &pf->state))
+/**
+ * i40e_rebuild - rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 set_fc_aq_fail = 0;
+ i40e_status ret;
+ u32 val;
+ int v;
+
+ if (test_bit(__I40E_DOWN, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -6963,9 +7206,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
+ i40e_get_oem_version(&pf->hw);
/* re-verify the eeprom if we just had an EMP reset */
- if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
i40e_verify_eeprom(pf);
i40e_clear_pxe_mode(hw);
@@ -6974,8 +7218,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
goto end_core_reset;
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp,
- pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ hw->func_caps.num_rx_qp, 0, 0);
if (ret) {
dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
goto end_core_reset;
@@ -6994,14 +7237,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
-#ifdef I40E_FCOE
- i40e_init_pf_fcoe(pf);
-
-#endif
/* do basic switch setup */
+ if (!lock_acquired)
+ rtnl_lock();
ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
- goto end_core_reset;
+ goto end_unlock;
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -7072,7 +7313,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of Main VSI failed: %d\n", ret);
- goto end_core_reset;
+ goto end_unlock;
}
}
@@ -7115,18 +7356,45 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
- if (pf->num_alloc_vfs) {
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_reset_vf(&pf->vf[v], true);
- }
+ /* Release the RTNL lock before we start resetting VFs */
+ if (!lock_acquired)
+ rtnl_unlock();
+
+ i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */
i40e_send_version(pf);
+ /* We've already released the lock, so don't do it again */
+ goto end_core_reset;
+
+end_unlock:
+ if (!lock_acquired)
+ rtnl_unlock();
end_core_reset:
- clear_bit(__I40E_RESET_FAILED, &pf->state);
+ clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery:
- clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired)
+{
+ int ret;
+ /* Now we wait for GRST to settle out.
+ * We don't have to delete the VEBs or VSIs from the hw switch
+ * because the reset will make them disappear.
+ */
+ ret = i40e_reset(pf);
+ if (!ret)
+ i40e_rebuild(pf, reinit, lock_acquired);
}
/**
@@ -7135,11 +7403,13 @@ clear_recovery:
*
* Close up the VFs and other things in prep for a Core Reset,
* then get ready to rebuild the world.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
**/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
{
- i40e_prep_for_reset(pf);
- i40e_reset_and_rebuild(pf, false);
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, false, lock_acquired);
}
/**
@@ -7157,7 +7427,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
+ if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
return;
/* find what triggered the MDD event */
@@ -7209,7 +7479,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
/* Queue belongs to the PF, initiate a reset */
if (pf_mdd_detected) {
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
}
@@ -7238,12 +7508,12 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
"Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
- set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
}
}
/* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
@@ -7251,6 +7521,23 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
/**
+ * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
+ * @pf: board private structure
+ **/
+static void i40e_sync_udp_filters(struct i40e_pf *pf)
+{
+ int i;
+
+ /* loop through and set pending bit for all active UDP filters */
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->udp_ports[i].port)
+ pf->pending_udp_bitmap |= BIT_ULL(i);
+ }
+
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+}
+
+/**
* i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
* @pf: board private structure
**/
@@ -7258,7 +7545,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- __be16 port;
+ u16 port;
int i;
if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
@@ -7269,7 +7556,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) {
pf->pending_udp_bitmap &= ~BIT_ULL(i);
- port = pf->udp_ports[i].index;
+ port = pf->udp_ports[i].port;
if (port)
ret = i40e_aq_add_udp_tunnel(hw, port,
pf->udp_ports[i].type,
@@ -7282,11 +7569,11 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
"%s %s port %d, index %d failed, err %s aq_err %s\n",
pf->udp_ports[i].type ? "vxlan" : "geneve",
port ? "add" : "delete",
- ntohs(port), i,
+ port, i,
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
- pf->udp_ports[i].index = 0;
+ pf->udp_ports[i].port = 0;
}
}
}
@@ -7304,11 +7591,10 @@ static void i40e_service_task(struct work_struct *work)
unsigned long start_time = jiffies;
/* don't bother with service tasks if a reset is in progress */
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return;
- }
- if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
i40e_detect_recover_hung(pf);
@@ -7318,23 +7604,34 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
- i40e_client_subtask(pf);
+ if (pf->flags & I40E_FLAG_CLIENT_RESET) {
+ /* Client subtask will reopen next time through. */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+ pf->flags &= ~I40E_FLAG_CLIENT_RESET;
+ } else {
+ i40e_client_subtask(pf);
+ if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
+ i40e_notify_client_of_l2_param_changes(
+ pf->vsi[pf->lan_vsi]);
+ pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
+ }
+ }
i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
/* flush memory to make sure state is correct before next watchdog */
smp_mb__before_atomic();
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
/* If the tasks have taken longer than one timer cycle or there
* is more work to be done, reschedule the service task now
* rather than wait for the timer to tick again.
*/
if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
- test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
- test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
- test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
+ test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
i40e_service_event_schedule(pf);
}
@@ -7391,15 +7688,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
I40E_REQ_DESCRIPTOR_MULTIPLE);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- vsi->alloc_queue_pairs = pf->num_fcoe_qps;
- vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
- I40E_REQ_DESCRIPTOR_MULTIPLE);
- vsi->num_q_vectors = pf->num_fcoe_msix;
- break;
-
-#endif /* I40E_FCOE */
default:
WARN_ON(1);
return -ENODATA;
@@ -7418,15 +7706,22 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
**/
static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
{
+ struct i40e_ring **next_rings;
int size;
int ret = 0;
- /* allocate memory for both Tx and Rx ring pointers */
- size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
vsi->tx_rings = kzalloc(size, GFP_KERNEL);
if (!vsi->tx_rings)
return -ENOMEM;
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+ next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ vsi->xdp_rings = next_rings;
+ next_rings += vsi->alloc_queue_pairs;
+ }
+ vsi->rx_rings = next_rings;
if (alloc_qvectors) {
/* allocate memory for q_vector pointers */
@@ -7492,7 +7787,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
}
vsi->type = type;
vsi->back = pf;
- set_bit(__I40E_DOWN, &vsi->state);
+ set_bit(__I40E_VSI_DOWN, vsi->state);
vsi->flags = 0;
vsi->idx = vsi_idx;
vsi->int_rate_limit = 0;
@@ -7546,6 +7841,7 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
kfree(vsi->tx_rings);
vsi->tx_rings = NULL;
vsi->rx_rings = NULL;
+ vsi->xdp_rings = NULL;
}
/**
@@ -7629,6 +7925,8 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
+ if (vsi->xdp_rings)
+ vsi->xdp_rings[i] = NULL;
}
}
}
@@ -7639,43 +7937,61 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
- struct i40e_ring *tx_ring, *rx_ring;
+ int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
struct i40e_pf *pf = vsi->back;
- int i;
+ struct i40e_ring *ring;
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
/* allocate space for both Tx and Rx in one shot */
- tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
- if (!tx_ring)
+ ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!ring)
goto err_out;
- tx_ring->queue_index = i;
- tx_ring->reg_idx = vsi->base_queue + i;
- tx_ring->ring_active = false;
- tx_ring->vsi = vsi;
- tx_ring->netdev = vsi->netdev;
- tx_ring->dev = &pf->pdev->dev;
- tx_ring->count = vsi->num_desc;
- tx_ring->size = 0;
- tx_ring->dcb_tc = 0;
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
- tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
- tx_ring->tx_itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = tx_ring;
-
- rx_ring = &tx_ring[1];
- rx_ring->queue_index = i;
- rx_ring->reg_idx = vsi->base_queue + i;
- rx_ring->ring_active = false;
- rx_ring->vsi = vsi;
- rx_ring->netdev = vsi->netdev;
- rx_ring->dev = &pf->pdev->dev;
- rx_ring->count = vsi->num_desc;
- rx_ring->size = 0;
- rx_ring->dcb_tc = 0;
- rx_ring->rx_itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = rx_ring;
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ ring->tx_itr_setting = pf->tx_itr_default;
+ vsi->tx_rings[i] = ring++;
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ goto setup_rx;
+
+ ring->queue_index = vsi->alloc_queue_pairs + i;
+ ring->reg_idx = vsi->base_queue + ring->queue_index;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = NULL;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ set_ring_xdp(ring);
+ ring->tx_itr_setting = pf->tx_itr_default;
+ vsi->xdp_rings[i] = ring++;
+
+setup_rx:
+ ring->queue_index = i;
+ ring->reg_idx = vsi->base_queue + i;
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ ring->size = 0;
+ ring->dcb_tc = 0;
+ ring->rx_itr_setting = pf->rx_itr_default;
+ vsi->rx_rings[i] = ring;
}
return 0;
@@ -7716,6 +8032,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
static int i40e_init_msix(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ int cpus, extra_vectors;
int vectors_left;
int v_budget, i;
int v_actual;
@@ -7734,9 +8051,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
* - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* - The CPU count within the NUMA node if iWARP is enabled
-#ifdef I40E_FCOE
- * - The number of FCOE qps.
-#endif
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
@@ -7751,10 +8065,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left--;
}
- /* reserve vectors for the main PF traffic queues */
- pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+ /* reserve some vectors for the main PF traffic queues. Initially we
+ * only reserve at most 50% of the available vectors, in the case that
+ * the number of online CPUs is large. This ensures that we can enable
+ * extra features as well. Once we've enabled the other features, we
+ * will use any remaining vectors to reach as close as we can to the
+ * number of online CPUs.
+ */
+ cpus = num_online_cpus();
+ pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
vectors_left -= pf->num_lan_msix;
- v_budget += pf->num_lan_msix;
/* reserve one vector for sideband flow director */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -7767,20 +8087,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
-#ifdef I40E_FCOE
- /* can we reserve enough for FCoE? */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- if (!vectors_left)
- pf->num_fcoe_msix = 0;
- else if (vectors_left >= pf->num_fcoe_qps)
- pf->num_fcoe_msix = pf->num_fcoe_qps;
- else
- pf->num_fcoe_msix = 1;
- v_budget += pf->num_fcoe_msix;
- vectors_left -= pf->num_fcoe_msix;
- }
-
-#endif
/* can we reserve enough for iWARP? */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
iwarp_requested = pf->num_iwarp_msix;
@@ -7817,6 +8123,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ /* On systems with a large number of SMP cores, we previously limited
+ * the number of vectors for num_lan_msix to be at most 50% of the
+ * available vectors, to allow for other features. Now, we add back
+ * the remaining vectors. However, we ensure that the total
+ * num_lan_msix will not exceed num_online_cpus(). To do this, we
+ * calculate the number of vectors we can add without going over the
+ * cap of CPUs. For systems with a small number of CPUs this will be
+ * zero.
+ */
+ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+ pf->num_lan_msix += extra_vectors;
+ vectors_left -= extra_vectors;
+
+ WARN(vectors_left < 0,
+ "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+ v_budget += pf->num_lan_msix;
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
if (!pf->msix_entries)
@@ -7857,10 +8180,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
pf->num_vmdq_qps = 1;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
- pf->num_fcoe_msix = 0;
-#endif
/* partition out the remaining vectors */
switch (vec) {
@@ -7874,13 +8193,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else {
pf->num_lan_msix = 2;
}
-#ifdef I40E_FCOE
- /* give one vector to FCoE */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_lan_msix = 1;
- pf->num_fcoe_msix = 1;
- }
-#endif
break;
default:
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
@@ -7900,13 +8212,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
(vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
pf->num_lan_msix);
pf->num_lan_qps = pf->num_lan_msix;
-#ifdef I40E_FCOE
- /* give one vector to FCoE */
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_fcoe_msix = 1;
- vec--;
- }
-#endif
break;
}
}
@@ -7927,13 +8232,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
}
-#ifdef I40E_FCOE
-
- if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
- dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
- }
-#endif
i40e_debug(&pf->hw, I40E_DEBUG_INIT,
"MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
pf->num_lan_msix,
@@ -8032,9 +8330,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_DCB_ENABLED |
@@ -8095,7 +8390,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
/* Only request the irq if this is the first time through, and
* not when we're rebuilding after a Reset
*/
- if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
err = request_irq(pf->msix_entries[0].vector,
i40e_intr, 0, pf->int_name, pf);
if (err) {
@@ -8267,13 +8562,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
if (vsi->type == I40E_VSI_MAIN) {
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
- seed_dw[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
} else if (vsi->type == I40E_VSI_SRIOV) {
for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HKEY1(i, vf_id),
- seed_dw[i]);
+ wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
}
@@ -8291,9 +8583,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
return -EINVAL;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HLUT1(i, vf_id),
- lut_dw[i]);
+ wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
}
@@ -8421,9 +8711,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
/* Determine the RSS size of the VSI */
- if (!vsi->rss_size)
- vsi->rss_size = min_t(int, pf->alloc_rss_size,
- vsi->num_queue_pairs);
+ if (!vsi->rss_size) {
+ u16 qcount;
+
+ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+ }
if (!vsi->rss_size)
return -EINVAL;
@@ -8457,6 +8750,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
*
* returns 0 if rss is not enabled, if enabled returns the final rss queue
* count which may be different from the requested queue count.
+ * Note: expects to be called while under rtnl_lock()
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
@@ -8469,12 +8763,14 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
new_rss_size = min_t(int, queue_count, pf->rss_size_max);
if (queue_count != vsi->num_queue_pairs) {
+ u16 qcount;
+
vsi->req_queue_pairs = queue_count;
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
pf->alloc_rss_size = new_rss_size;
- i40e_reset_and_rebuild(pf, true);
+ i40e_reset_and_rebuild(pf, true, true);
/* Discard the user configured hash keys and lut, if less
* queues are enabled.
@@ -8486,8 +8782,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/* Reset vsi->rss_size, as number of enabled queues changed */
- vsi->rss_size = min_t(int, pf->alloc_rss_size,
- vsi->num_queue_pairs);
+ qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
i40e_pf_config_rss(pf);
}
@@ -8497,10 +8793,10 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/**
- * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
{
i40e_status status;
bool min_valid, max_valid;
@@ -8511,27 +8807,27 @@ i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
if (!status) {
if (min_valid)
- pf->npar_min_bw = min_bw;
+ pf->min_bw = min_bw;
if (max_valid)
- pf->npar_max_bw = max_bw;
+ pf->max_bw = max_bw;
}
return status;
}
/**
- * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
{
struct i40e_aqc_configure_partition_bw_data bw_data;
i40e_status status;
/* Set the valid bit for this PF */
bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
- bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
- bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
/* Set the new bandwidths */
status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
@@ -8540,10 +8836,10 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
{
/* Commit temporary BW setting to permanent NVM image */
enum i40e_admin_queue_err last_aq_status;
@@ -8662,16 +8958,19 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- if (i40e_get_npar_bw_setting(pf))
+ if (i40e_get_partition_bw_setting(pf)) {
dev_warn(&pf->pdev->dev,
- "Could not get NPAR bw settings\n");
- else
+ "Could not get partition bw settings\n");
+ } else {
dev_info(&pf->pdev->dev,
- "Min BW = %8.8x, Max BW = %8.8x\n",
- pf->npar_min_bw, pf->npar_max_bw);
+ "Partition BW Min = %8.8x, Max = %8.8x\n",
+ pf->min_bw, pf->max_bw);
+
+ /* nudge the Tx scheduler */
+ i40e_set_partition_bw_setting(pf);
+ }
}
- /* FW/NVM is not yet fixed in this regard */
if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
@@ -8688,7 +8987,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.func_caps.fd_filters_best_effort;
}
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4))) {
pf->flags |= I40E_FLAG_RESTART_AUTONEG;
@@ -8697,13 +8996,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
/* Disable FW LLDP if FW < v4.3 */
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4)))
pf->flags |= I40E_FLAG_STOP_FW_LLDP;
/* Use the FW Set LLDP MIB API if FW > v4.40 */
- if (i40e_is_mac_710(&pf->hw) &&
+ if ((pf->hw.mac.type == I40E_MAC_XL710) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
(pf->hw.aq.fw_maj_ver >= 5)))
pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
@@ -8720,10 +9019,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
-#ifdef I40E_FCOE
- i40e_init_pf_fcoe(pf);
-
-#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -8734,26 +9029,28 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
#endif /* CONFIG_PCI_IOV */
if (pf->hw.mac.type == I40E_MAC_X722) {
- pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
- I40E_FLAG_128_QP_RSS_CAPABLE |
- I40E_FLAG_HW_ATR_EVICT_CAPABLE |
- I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
- I40E_FLAG_WB_ON_ITR_CAPABLE |
- I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
- I40E_FLAG_NO_PCI_LINK_CHECK |
- I40E_FLAG_USE_SET_LLDP_MIB |
- I40E_FLAG_GENEVE_OFFLOAD_CAPABLE |
- I40E_FLAG_PTP_L4_CAPABLE;
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE
+ | I40E_FLAG_128_QP_RSS_CAPABLE
+ | I40E_FLAG_HW_ATR_EVICT_CAPABLE
+ | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE
+ | I40E_FLAG_WB_ON_ITR_CAPABLE
+ | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE
+ | I40E_FLAG_NO_PCI_LINK_CHECK
+ | I40E_FLAG_USE_SET_LLDP_MIB
+ | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
+ | I40E_FLAG_PTP_L4_CAPABLE
+ | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
((pf->hw.aq.api_maj_ver == 1) &&
(pf->hw.aq.api_min_ver > 4))) {
/* Supported in FW API version higher than 1.4 */
pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
- } else {
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
}
+ /* Enable HW ATR eviction if possible */
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
+
pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
@@ -8776,10 +9073,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
mutex_init(&pf->switch_mutex);
- /* If NPAR is enabled nudge the Tx scheduler */
- if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
- i40e_set_npar_bw_setting(pf);
-
sw_init_done:
return err;
}
@@ -8811,16 +9104,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
need_reset = true;
i40e_fdir_filter_exit(pf);
}
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_SB_AUTO_DISABLED);
/* reset fd counters */
- pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
- pf->fdir_pf_active_filters = 0;
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
}
}
@@ -8853,6 +9146,7 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
**/
static int i40e_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -8876,7 +9170,7 @@ static int i40e_set_features(struct net_device *netdev,
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
return 0;
}
@@ -8888,12 +9182,12 @@ static int i40e_set_features(struct net_device *netdev,
*
* Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
**/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
{
u8 i;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->udp_ports[i].index == port)
+ if (pf->udp_ports[i].port == port)
return i;
}
@@ -8911,7 +9205,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- __be16 port = ti->port;
+ u16 port = ntohs(ti->port);
u8 next_idx;
u8 idx;
@@ -8919,8 +9213,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "port %d already offloaded\n",
- ntohs(port));
+ netdev_info(netdev, "port %d already offloaded\n", port);
return;
}
@@ -8929,7 +9222,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
- ntohs(port));
+ port);
return;
}
@@ -8947,7 +9240,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
}
/* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].index = port;
+ pf->udp_ports[next_idx].port = port;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
}
@@ -8963,7 +9256,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- __be16 port = ti->port;
+ u16 port = ntohs(ti->port);
u8 idx;
idx = i40e_get_udp_port_idx(pf, port);
@@ -8988,14 +9281,14 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
/* if port exists, set it to 0 (mark for deletion)
* and make it pending
*/
- pf->udp_ports[idx].index = 0;
+ pf->udp_ports[idx].port = 0;
pf->pending_udp_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
return;
not_found:
netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
- ntohs(port));
+ port);
}
static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -9072,6 +9365,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* is to change the mode then that requires a PF reset to
* allow rebuild of the components with required hardware
* bridge mode enabled.
+ *
+ * Note: expects to be called while under rtnl_lock()
**/
static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh,
@@ -9127,7 +9422,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
else
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
+ true);
break;
}
}
@@ -9233,6 +9529,72 @@ out_err:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
+/**
+ * i40e_xdp_setup - add/remove an XDP program
+ * @vsi: VSI to changed
+ * @prog: XDP program
+ **/
+static int i40e_xdp_setup(struct i40e_vsi *vsi,
+ struct bpf_prog *prog)
+{
+ int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct i40e_pf *pf = vsi->back;
+ struct bpf_prog *old_prog;
+ bool need_reset;
+ int i;
+
+ /* Don't allow frames that span over multiple buffers */
+ if (frame_size > vsi->rx_buf_len)
+ return -EINVAL;
+
+ if (!i40e_enabled_xdp_vsi(vsi) && !prog)
+ return 0;
+
+ /* When turning XDP on->off/off->on we reset and rebuild the rings. */
+ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
+
+ if (need_reset)
+ i40e_prep_for_reset(pf, true);
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+
+ if (need_reset)
+ i40e_reset_and_rebuild(pf, true, true);
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+/**
+ * i40e_xdp - implements ndo_xdp for i40e
+ * @dev: netdevice
+ * @xdp: XDP command
+ **/
+static int i40e_xdp(struct net_device *dev,
+ struct netdev_xdp *xdp)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return i40e_xdp_setup(vsi, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -9250,10 +9612,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = __i40e_setup_tc,
-#ifdef I40E_FCOE
- .ndo_fcoe_enable = i40e_fcoe_enable,
- .ndo_fcoe_disable = i40e_fcoe_disable,
-#endif
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
@@ -9269,6 +9627,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
+ .ndo_xdp = i40e_xdp,
};
/**
@@ -9286,6 +9645,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
u8 broadcast[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
int etherdev_size;
+ netdev_features_t hw_enc_features;
+ netdev_features_t hw_features;
etherdev_size = sizeof(struct i40e_netdev_priv);
netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
@@ -9296,56 +9657,61 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HIGHDMA |
- NETIF_F_SOFT_FEATURES |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM |
- NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_IPXIP6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL |
- NETIF_F_SCTP_CRC |
- NETIF_F_RXHASH |
- NETIF_F_RXCSUM |
- 0;
+ hw_enc_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_enc_features |= hw_enc_features;
+
/* record features VLANs can make use of */
- netdev->vlan_features |= netdev->hw_enc_features |
- NETIF_F_TSO_MANGLEID;
+ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
netdev->hw_features |= NETIF_F_NTUPLE;
+ hw_features = hw_enc_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
- netdev->hw_features |= netdev->hw_enc_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= hw_features;
- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
- /* The following steps are necessary to prevent reception
- * of tagged packets - some older NVM configurations load a
- * default a MAC-VLAN filter that accepts any tagged packet
- * which must be replaced by a normal filter.
+ /* The following steps are necessary for two reasons. First,
+ * some older NVM configurations load a default MAC-VLAN
+ * filter that will accept any tagged packet, and we want to
+ * replace this with a normal filter. Additionally, it is
+ * possible our MAC address was provided by the platform using
+ * Open Firmware or similar.
+ *
+ * Thus, we need to remove the default filter and install one
+ * specific to the MAC address.
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
@@ -9354,7 +9720,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
}
@@ -9373,7 +9739,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
*/
eth_broadcast_addr(broadcast);
spin_lock_bh(&vsi->mac_filter_hash_lock);
- i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY);
+ i40e_add_mac_filter(vsi, broadcast);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, mac_addr);
@@ -9387,9 +9753,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->netdev_ops = &i40e_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
i40e_set_ethtool_ops(netdev);
-#ifdef I40E_FCOE
- i40e_fcoe_config_netdev(netdev, vsi);
-#endif
/* MTU range: 68 - 9706 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -9613,16 +9976,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
-#ifdef I40E_FCOE
- case I40E_VSI_FCOE:
- ret = i40e_fcoe_vsi_init(vsi, &ctxt);
- if (ret) {
- dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
- return ret;
- }
- break;
-
-#endif /* I40E_FCOE */
case I40E_VSI_IWARP:
/* send down message to iWARP */
break;
@@ -9649,7 +10002,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
}
vsi->active_filters = 0;
- clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
@@ -9702,7 +10055,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
return -ENODEV;
}
if (vsi == pf->vsi[pf->lan_vsi] &&
- !test_bit(__I40E_DOWN, &pf->state)) {
+ !test_bit(__I40E_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
@@ -9843,6 +10196,7 @@ vector_setup_out:
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
+ u16 alloc_queue_pairs;
struct i40e_pf *pf;
u8 enabled_tc;
int ret;
@@ -9861,11 +10215,14 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
if (ret)
goto err_vsi;
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
"failed to get tracking for %d queues for VSI %d err %d\n",
- vsi->alloc_queue_pairs, vsi->seid, ret);
+ alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -9921,6 +10278,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
{
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
+ u16 alloc_queue_pairs;
int ret, i;
int v_idx;
@@ -10008,12 +10366,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
- vsi->idx);
+ alloc_queue_pairs = vsi->alloc_queue_pairs *
+ (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+
+ ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
"failed to get tracking for %d queues for VSI %d err=%d\n",
- vsi->alloc_queue_pairs, vsi->seid, ret);
+ alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
vsi->base_queue = ret;
@@ -10039,7 +10399,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
}
case I40E_VSI_VMDQ2:
- case I40E_VSI_FCOE:
ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
@@ -10679,7 +11038,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_pf_config_rss(pf);
/* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw);
i40e_link_event(pf);
/* Initialize user-specific link properties */
@@ -10688,6 +11046,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
+ /* repopulate tunnel port filters */
+ i40e_sync_udp_filters(pf);
+
return ret;
}
@@ -10700,9 +11061,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
int queues_left;
pf->num_lan_qps = 0;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
-#endif
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
@@ -10719,9 +11077,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
@@ -10738,9 +11093,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED |
-#ifdef I40E_FCOE
- I40E_FLAG_FCOE_ENABLED |
-#endif
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED |
@@ -10761,22 +11113,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
}
-#ifdef I40E_FCOE
- if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- if (I40E_DEFAULT_FCOE <= queues_left) {
- pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
- } else if (I40E_MINIMUM_FCOE <= queues_left) {
- pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
- } else {
- pf->num_fcoe_qps = 0;
- pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
- dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
- }
-
- queues_left -= pf->num_fcoe_qps;
- }
-
-#endif
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
if (queues_left > 1) {
queues_left -= 1; /* save 1 queue for FD */
@@ -10808,9 +11144,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
queues_left);
-#ifdef I40E_FCOE
- dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
-#endif
}
/**
@@ -10877,10 +11210,6 @@ static void i40e_print_features(struct i40e_pf *pf)
i += snprintf(&buf[i], REMAIN(i), " Geneve");
if (pf->flags & I40E_FLAG_PTP)
i += snprintf(&buf[i], REMAIN(i), " PTP");
-#ifdef I40E_FCOE
- if (pf->flags & I40E_FLAG_FCOE_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " FCOE");
-#endif
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " VEB");
else
@@ -10893,20 +11222,18 @@ static void i40e_print_features(struct i40e_pf *pf)
/**
* i40e_get_platform_mac_addr - get platform-specific MAC address
- *
* @pdev: PCI device information struct
* @pf: board private structure
*
- * Look up the MAC address in Open Firmware on systems that support it,
- * and use IDPROM on SPARC if no OF address is found. On return, the
- * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
- * has been selected.
+ * Look up the MAC address for the device. First we'll try
+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
+ * specific fallback. Otherwise, we'll default to the stored value in
+ * firmware.
**/
static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
{
- pf->flags &= ~I40E_FLAG_PF_MAC;
- if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
- pf->flags |= I40E_FLAG_PF_MAC;
+ if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
+ i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
}
/**
@@ -10971,7 +11298,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pf->next_vsi = 0;
pf->pdev = pdev;
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_DOWN, pf->state);
hw = &pf->hw;
hw->back = pf;
@@ -10994,8 +11321,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ hw->bus.bus_id = pdev->bus->number;
pf->instance = pfs_found;
+ INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+ INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
*/
@@ -11060,6 +11391,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_get_oem_version(hw);
/* provide nvm, fw, api versions */
dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
@@ -11094,8 +11426,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp,
- pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ hw->func_caps.num_rx_qp, 0, 0);
if (err) {
dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
goto err_init_lan_hmc;
@@ -11117,9 +11448,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_aq_stop_lldp(hw, true, NULL);
}
- i40e_get_mac_addr(hw, hw->mac.addr);
/* allow a platform config to override the HW addr */
i40e_get_platform_mac_addr(pdev, pf);
+
if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
@@ -11130,18 +11461,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
if (is_valid_ether_addr(hw->mac.port_addr))
pf->flags |= I40E_FLAG_PORT_ID_VALID;
-#ifdef I40E_FCOE
- err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
- if (err)
- dev_info(&pdev->dev,
- "(non-fatal) SAN MAC retrieval failed: %d\n", err);
- if (!is_valid_ether_addr(hw->mac.san_addr)) {
- dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
- hw->mac.san_addr);
- ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
- }
- dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
-#endif /* I40E_FCOE */
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
@@ -11159,8 +11478,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->service_timer_period = HZ;
INIT_WORK(&pf->service_task, i40e_service_task);
- clear_bit(__I40E_SERVICE_SCHED, &pf->state);
- pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
@@ -11198,7 +11516,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ !test_bit(__I40E_BAD_EEPROM, pf->state)) {
if (pci_num_vf(pdev))
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
}
@@ -11271,7 +11589,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
*/
- clear_bit(__I40E_DOWN, &pf->state);
+ clear_bit(__I40E_DOWN, pf->state);
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
@@ -11291,7 +11609,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ !test_bit(__I40E_BAD_EEPROM, pf->state)) {
/* disable link interrupts for VFs */
val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
@@ -11332,16 +11650,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
round_jiffies(jiffies + pf->service_timer_period));
/* add this PF to client device list and launch a client service task */
- err = i40e_lan_add_device(pf);
- if (err)
- dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
- err);
-
-#ifdef I40E_FCOE
- /* create FCoE interface */
- i40e_fcoe_vsi_setup(pf);
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ err = i40e_lan_add_device(pf);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+ err);
+ }
-#endif
#define PCI_SPEED_SIZE 8
#define PCI_WIDTH_SIZE 8
/* Devices on the IOSF bus do not have this information
@@ -11429,7 +11744,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Unwind what we've done if something failed in the setup */
err_vsis:
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_DOWN, pf->state);
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
err_switch_setup:
@@ -11480,13 +11795,18 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
/* no more scheduling of any task */
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
if (pf->service_timer.data)
del_timer_sync(&pf->service_timer);
if (pf->service_task.func)
cancel_work_sync(&pf->service_task);
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -11513,10 +11833,11 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
/* remove attached clients */
- ret_code = i40e_lan_del_device(pf);
- if (ret_code) {
- dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
- ret_code);
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ ret_code = i40e_lan_del_device(pf);
+ if (ret_code)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ ret_code);
}
/* shutdown and destroy the HMC */
@@ -11583,11 +11904,8 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
}
/* shutdown all operations */
- if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
- rtnl_lock();
- i40e_prep_for_reset(pf);
- rtnl_unlock();
- }
+ if (!test_bit(__I40E_SUSPENDED, pf->state))
+ i40e_prep_for_reset(pf, false);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -11650,12 +11968,57 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (test_bit(__I40E_SUSPENDED, &pf->state))
+ if (test_bit(__I40E_SUSPENDED, pf->state))
return;
- rtnl_lock();
- i40e_handle_reset_warning(pf);
- rtnl_unlock();
+ i40e_handle_reset_warning(pf, false);
+}
+
+/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 mac_addr[6];
+ u16 flags = 0;
+
+ /* Get current MAC address in case it's an LAA */
+ if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
+ ether_addr_copy(mac_addr,
+ pf->vsi[pf->lan_vsi]->netdev->dev_addr);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Failed to retrieve MAC address; using default\n");
+ ether_addr_copy(mac_addr, hw->mac.addr);
+ }
+
+ /* The FW expects the mac address write cmd to first be called with
+ * one of these flags before calling it again with the multicast
+ * enable flags.
+ */
+ flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+ if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+ flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+ return;
+ }
+
+ flags = I40E_AQC_MC_MAG_EN
+ | I40E_AQC_WOL_PRESERVE_ON_PFR
+ | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to enable Multicast Magic Packet wake up\n");
}
/**
@@ -11667,10 +12030,10 @@ static void i40e_shutdown(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
rtnl_lock();
- i40e_prep_for_reset(pf);
+ i40e_prep_for_reset(pf, true);
rtnl_unlock();
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11680,9 +12043,15 @@ static void i40e_shutdown(struct pci_dev *pdev)
cancel_work_sync(&pf->service_task);
i40e_fdir_teardown(pf);
- rtnl_lock();
- i40e_prep_for_reset(pf);
- rtnl_unlock();
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
+ if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
+ i40e_enable_mc_magic_wake(pf);
+
+ i40e_prep_for_reset(pf, false);
wr32(hw, I40E_PFPM_APM,
(pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11708,12 +12077,13 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
struct i40e_hw *hw = &pf->hw;
int retval = 0;
- set_bit(__I40E_SUSPENDED, &pf->state);
- set_bit(__I40E_DOWN, &pf->state);
+ set_bit(__I40E_SUSPENDED, pf->state);
+ set_bit(__I40E_DOWN, pf->state);
- rtnl_lock();
- i40e_prep_for_reset(pf);
- rtnl_unlock();
+ if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
+ i40e_enable_mc_magic_wake(pf);
+
+ i40e_prep_for_reset(pf, false);
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
@@ -11757,11 +12127,9 @@ static int i40e_resume(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
/* handling the reset will rebuild the device state */
- if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
- clear_bit(__I40E_DOWN, &pf->state);
- rtnl_lock();
- i40e_reset_and_rebuild(pf, false);
- rtnl_unlock();
+ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
+ clear_bit(__I40E_DOWN, pf->state);
+ i40e_reset_and_rebuild(pf, false, false);
}
return 0;