aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c16
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c179
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c30
19 files changed, 443 insertions, 237 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f7a332e51524..1ab8dbe2d880 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -16224,7 +16224,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
if (val < MAX_FRAME_SIZE_DEFAULT)
dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- i, val);
+ pf->hw.port, val);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 08d7edccfb8d..3f99eb198245 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3844,7 +3844,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
int aq_ret = 0;
- int i, ret;
+ int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = -EINVAL;
@@ -3868,8 +3868,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
- if (!cfilter)
- return -ENOMEM;
+ if (!cfilter) {
+ aq_ret = -ENOMEM;
+ goto err_out;
+ }
/* parse destination mac address */
for (i = 0; i < ETH_ALEN; i++)
@@ -3917,13 +3919,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
/* Adding cloud filter programmed as TC filter */
if (tcf.dst_port)
- ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
else
- ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
- if (ret) {
+ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
- vf->vf_id, ERR_PTR(ret),
+ vf->vf_id, ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e7ab89dc883a..63b45c61cc4a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -292,6 +292,7 @@ struct iavf_adapter {
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
+#define IAVF_FLAG_FDIR_ENABLED BIT(21)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 6f236d1a6444..dc499fe7734e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -827,18 +827,10 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_coalesce_usecs == 0) {
- if (ec->use_adaptive_rx_coalesce)
- netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
- } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
- (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
+ if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
- } else if (ec->tx_coalesce_usecs == 0) {
- if (ec->use_adaptive_tx_coalesce)
- netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
- } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
- (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
+ } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
@@ -1069,7 +1061,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *rule = NULL;
int ret = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1211,7 +1203,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
unsigned int cnt = 0;
int val = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
cmd->data = IAVF_MAX_FDIR_FILTERS;
@@ -1403,7 +1395,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
int count = 50;
int err;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
if (fsp->flow_type & FLOW_MAC_EXT)
@@ -1444,12 +1436,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ if (adapter->link_up) {
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ } else {
+ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+ }
spin_unlock_bh(&adapter->fdir_fltr_lock);
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
-
+ if (adapter->link_up)
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
ret:
if (err && fltr)
kfree(fltr);
@@ -1471,7 +1467,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
struct iavf_fdir_fltr *fltr = NULL;
int err = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1480,6 +1476,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+ list_del(&fltr->list);
+ kfree(fltr);
+ adapter->fdir_active_fltr--;
+ fltr = NULL;
} else {
err = -EBUSY;
}
@@ -1788,7 +1789,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
spin_lock_bh(&adapter->fdir_fltr_lock);
cmd->rule_cnt = adapter->fdir_active_fltr;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index 9eb9f73f6adf..d31bd923ba8c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -6,12 +6,25 @@
struct iavf_adapter;
-/* State of Flow Director filter */
+/* State of Flow Director filter
+ *
+ * *_REQUEST states are used to mark filter to be sent to PF driver to perform
+ * an action (either add or delete filter). *_PENDING states are an indication
+ * that request was sent to PF and the driver is waiting for response.
+ *
+ * Both DELETE and DISABLE states are being used to delete a filter in PF.
+ * The difference is that after a successful response filter in DEL_PENDING
+ * state is being deleted from VF driver as well and filter in DIS_PENDING state
+ * is being changed to INACTIVE state.
+ */
enum iavf_fdir_fltr_state_t {
IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
+ IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */
+ IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */
+ IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */
IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c862ebcd2e39..e8d5b889addc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -277,27 +277,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
}
/**
- * iavf_lock_timeout - try to lock mutex but give up after timeout
- * @lock: mutex that should be locked
- * @msecs: timeout in msecs
- *
- * Returns 0 on success, negative on failure
- **/
-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
-{
- unsigned int wait, delay = 10;
-
- for (wait = 0; wait < msecs; wait += delay) {
- if (mutex_trylock(lock))
- return 0;
-
- msleep(delay);
- }
-
- return -1;
-}
-
-/**
* iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
* @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
@@ -1353,18 +1332,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
**/
static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{
- struct iavf_fdir_fltr *fdir, *fdirtmp;
+ struct iavf_fdir_fltr *fdir;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
- list) {
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
- list_del(&fdir->list);
- kfree(fdir);
- adapter->fdir_active_fltr--;
- } else {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ /* Cancel a request, keep filter as inactive */
+ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* Disable filters which are active or have a pending
+ * request to PF to be added
+ */
+ fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -4113,6 +4094,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
/**
+ * iavf_restore_fdir_filters
+ * @adapter: board private structure
+ *
+ * Restore existing FDIR filters when VF netdev comes back up.
+ **/
+static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *f;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(f, &adapter->fdir_list_head, list) {
+ if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+ /* Cancel a request, keep filter as active */
+ f->state = IAVF_FDIR_FLTR_ACTIVE;
+ } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
+ f->state == IAVF_FDIR_FLTR_INACTIVE) {
+ /* Add filters which are inactive or have a pending
+ * request to PF to be deleted
+ */
+ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
* iavf_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -4179,8 +4187,9 @@ static int iavf_open(struct net_device *netdev)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- /* Restore VLAN filters that were removed with IFF_DOWN */
+ /* Restore filters that were removed with IFF_DOWN */
iavf_restore_filters(adapter);
+ iavf_restore_fdir_filters(adapter);
iavf_configure(adapter);
@@ -4311,6 +4320,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+/**
+ * iavf_disable_fdir - disable Flow Director and clear existing filters
+ * @adapter: board private structure
+ **/
+static void iavf_disable_fdir(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
+ bool del_filters = false;
+
+ adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
+
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+ fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
+ /* Delete filters not registered in PF */
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* Filters registered in PF, schedule their deletion */
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ del_filters = true;
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ /* Request to delete filter already sent to PF, change
+ * state to DEL_PENDING to delete filter after PF's
+ * response, not set as INACTIVE
+ */
+ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (del_filters) {
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+}
+
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_RX | \
@@ -4336,6 +4388,13 @@ static int iavf_set_features(struct net_device *netdev,
((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+ if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
+ if (features & NETIF_F_NTUPLE)
+ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+ else
+ iavf_disable_fdir(adapter);
+ }
+
return 0;
}
@@ -4685,6 +4744,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
features = iavf_fix_netdev_vlan_features(adapter, features);
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ features &= ~NETIF_F_NTUPLE;
+
return iavf_fix_strip_features(adapter, features);
}
@@ -4802,6 +4864,12 @@ int iavf_process_config(struct iavf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (FDIR_FLTR_SUPPORT(adapter)) {
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->features |= NETIF_F_NTUPLE;
+ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+ }
+
netdev->priv_flags |= IFF_UNICAST_FLT;
/* Do not turn on offloads when they are requested to be turned off.
@@ -4826,34 +4894,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
/**
- * iavf_shutdown - Shutdown the device in preparation for a reboot
- * @pdev: pci device structure
- **/
-static void iavf_shutdown(struct pci_dev *pdev)
-{
- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- struct net_device *netdev = adapter->netdev;
-
- netif_device_detach(netdev);
-
- if (netif_running(netdev))
- iavf_close(netdev);
-
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
- /* Prevent the watchdog from running. */
- iavf_change_state(adapter, __IAVF_REMOVE);
- adapter->aq_required = 0;
- mutex_unlock(&adapter->crit_lock);
-
-#ifdef CONFIG_PM
- pci_save_state(pdev);
-
-#endif
- pci_disable_device(pdev);
-}
-
-/**
* iavf_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in iavf_pci_tbl
@@ -5063,16 +5103,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
**/
static void iavf_remove(struct pci_dev *pdev)
{
- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
+ struct iavf_adapter *adapter;
struct net_device *netdev;
struct iavf_hw *hw;
- netdev = adapter->netdev;
+ /* Don't proceed with remove if netdev is already freed */
+ netdev = pci_get_drvdata(pdev);
+ if (!netdev)
+ return;
+
+ adapter = iavf_pdev_to_adapter(pdev);
hw = &adapter->hw;
if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
@@ -5184,11 +5229,25 @@ static void iavf_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->wq);
+ pci_set_drvdata(pdev, NULL);
+
free_netdev(netdev);
pci_disable_device(pdev);
}
+/**
+ * iavf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void iavf_shutdown(struct pci_dev *pdev)
+{
+ iavf_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
static struct pci_driver iavf_driver = {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 7e6ee32d19b6..10ba36602c0c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -15,7 +15,6 @@
*/
#define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
-#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */
#define IAVF_ITR_100K 10 /* all values below must be even */
#define IAVF_ITR_50K 20
#define IAVF_ITR_20K 50
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 64c4443dbef9..2d9366be0ec5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1735,8 +1735,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
**/
void iavf_del_fdir_filter(struct iavf_adapter *adapter)
{
+ struct virtchnl_fdir_del f = {};
struct iavf_fdir_fltr *fdir;
- struct virtchnl_fdir_del f;
bool process_fltr = false;
int len;
@@ -1753,11 +1753,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
process_fltr = true;
- memset(&f, 0, len);
f.vsi_id = fdir->vc_add_msg.vsi_id;
f.flow_id = fdir->flow_id;
fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
break;
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+ process_fltr = true;
+ f.vsi_id = fdir->vc_add_msg.vsi_id;
+ f.flow_id = fdir->flow_id;
+ fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
+ break;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -1902,6 +1907,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
}
/**
+ * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
+ * @adapter: private adapter structure
+ *
+ * Called after a reset to re-add all FDIR filters and delete some of them
+ * if they were pending to be deleted.
+ */
+static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *f, *ftmp;
+ bool add_filters = false;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
+ if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ f->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* All filters and requests have been removed in PF,
+ * restore them
+ */
+ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ add_filters = true;
+ } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ /* Link down state, leave filters as inactive */
+ f->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ /* Delete filters that were pending to be deleted, the
+ * list on PF is already cleared after a reset
+ */
+ list_del(&f->list);
+ kfree(f);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (add_filters)
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2078,7 +2125,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head,
list) {
- if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
iavf_stat_str(&adapter->hw,
@@ -2214,6 +2262,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ iavf_activate_fdir_filters(adapter);
+
iavf_parse_vf_resource_msg(adapter);
/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
@@ -2390,7 +2440,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
- if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+ del_fltr->status ==
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
fdir->loc);
list_del(&fdir->list);
@@ -2402,6 +2454,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
del_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
}
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+ del_fltr->status ==
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
+ del_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index cd065ec48c87..280994ee5933 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -570,6 +570,50 @@ resume_traffic:
}
/**
+ * ice_lag_build_netdev_list - populate the lag struct's netdev list
+ * @lag: local lag struct
+ * @ndlist: pointer to netdev list to populate
+ */
+static void ice_lag_build_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *nl;
+ struct net_device *tmp_nd;
+
+ INIT_LIST_HEAD(&ndlist->node);
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
+ if (!nl)
+ break;
+
+ nl->netdev = tmp_nd;
+ list_add(&nl->node, &ndlist->node);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = &ndlist->node;
+}
+
+/**
+ * ice_lag_destroy_netdev_list - free lag struct's netdev list
+ * @lag: pointer to local lag struct
+ * @ndlist: pointer to lag struct netdev list
+ */
+static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *entry, *n;
+
+ rcu_read_lock();
+ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = NULL;
+}
+
+/**
* ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
* @lag: primary interface LAG struct
* @oldport: lport of previous interface
@@ -597,7 +641,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
{
struct ice_lag_netdev_list ndlist;
- struct list_head *tmp, *n;
u8 pri_port, act_port;
struct ice_lag *lag;
struct ice_vsi *vsi;
@@ -621,38 +664,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
pri_port = pf->hw.port_info->lport;
act_port = lag->active_port;
- if (lag->upper_netdev) {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- }
-
- lag->netdev_head = &ndlist.node;
+ if (lag->upper_netdev)
+ ice_lag_build_netdev_list(lag, &ndlist);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
lag->bonded && lag->primary && pri_port != act_port &&
!list_empty(lag->netdev_head))
ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
- lag->netdev_head = NULL;
+ ice_lag_destroy_netdev_list(lag, &ndlist);
new_vf_unlock:
mutex_unlock(&pf->lag_mutex);
@@ -679,6 +699,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
+/**
+ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
+ * @lag: local lag struct
+ * @src_prt: lport value for source port
+ * @dst_prt: lport value for destination port
+ *
+ * This function is used to move nodes during an out-of-netdev-event situation,
+ * primarily when the driver needs to reconfigure or recreate resources.
+ *
+ * Must be called while holding the lag_mutex to avoid lag events from
+ * processing while out-of-sync moves are happening. Also, paired moves,
+ * such as used in a reset flow, should both be called under the same mutex
+ * lock to avoid changes between start of reset and end of reset.
+ */
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
+{
+ struct ice_lag_netdev_list ndlist;
+
+ ice_lag_build_netdev_list(lag, &ndlist);
+ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
#define ICE_LAG_SRIOV_CP_RECIPE 10
#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
@@ -2051,7 +2094,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
{
struct ice_lag_netdev_list ndlist;
struct ice_lag *lag, *prim_lag;
- struct list_head *tmp, *n;
u8 act_port, loc_port;
if (!pf->lag || !pf->lag->bonded)
@@ -2063,21 +2105,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
if (lag->primary) {
prim_lag = lag;
} else {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- lag->netdev_head = &ndlist.node;
+ ice_lag_build_netdev_list(lag, &ndlist);
prim_lag = ice_lag_find_primary(lag);
}
@@ -2107,13 +2135,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_clear_rdma_cap(pf);
lag_rebuild_out:
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
+ ice_lag_destroy_netdev_list(lag, &ndlist);
mutex_unlock(&pf->lag_mutex);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 9557e8605a07..ede833dfa658 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -65,4 +65,5 @@ int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 6607fa6fe556..fb9c93f37e84 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -7401,15 +7401,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- /* configure PTP timestamping after VSI rebuild */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) {
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
- ice_ptp_cfg_timestamp(pf, false);
- else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL)
- /* for E82x PHC owner always need to have interrupts */
- ice_ptp_cfg_timestamp(pf, true);
- }
-
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
@@ -7461,6 +7452,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
+
+ /* Restore timestamp mode settings after VSI rebuild */
+ ice_ptp_restore_timestamp_mode(pf);
return;
err_vsi_rebuild:
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 1eddcbe89b0c..71f405f8a6fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -256,48 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
}
/**
- * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamp interrupt is enabled or disabled
+ * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
+ * @pf: Board private structure
+ *
+ * Program the device to respond appropriately to the Tx timestamp interrupt
+ * cause.
*/
-static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on)
+static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
{
+ struct ice_hw *hw = &pf->hw;
+ bool enable;
u32 val;
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ /* React to interrupts across all quads. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
+ enable = true;
+ break;
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ /* Do not react to interrupts on any quad. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
+ enable = false;
+ break;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ default:
+ enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
+ break;
+ }
+
/* Configure the Tx timestamp interrupt */
- val = rd32(&pf->hw, PFINT_OICR_ENA);
- if (on)
+ val = rd32(hw, PFINT_OICR_ENA);
+ if (enable)
val |= PFINT_OICR_TSYN_TX_M;
else
val &= ~PFINT_OICR_TSYN_TX_M;
- wr32(&pf->hw, PFINT_OICR_ENA, val);
-}
-
-/**
- * ice_set_tx_tstamp - Enable or disable Tx timestamping
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamps are enabled or disabled
- */
-static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
-{
- struct ice_vsi *vsi;
- u16 i;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return;
-
- /* Set the timestamp enable flag for all the Tx rings */
- ice_for_each_txq(vsi, i) {
- if (!vsi->tx_rings[i])
- continue;
- vsi->tx_rings[i]->ptp_tx = on;
- }
-
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
- ice_ptp_configure_tx_tstamp(pf, on);
-
- pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ wr32(hw, PFINT_OICR_ENA, val);
}
/**
@@ -311,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
u16 i;
vsi = ice_get_main_vsi(pf);
- if (!vsi)
+ if (!vsi || !vsi->rx_rings)
return;
/* Set the timestamp flag for all the Rx rings */
@@ -320,23 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
+}
+
+/**
+ * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
+ * @pf: Board private structure
+ *
+ * Called during preparation for reset to temporarily disable timestamping on
+ * the device. Called during remove to disable timestamping while cleaning up
+ * driver resources.
+ */
+static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ val = rd32(hw, PFINT_OICR_ENA);
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(hw, PFINT_OICR_ENA, val);
- pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
- HWTSTAMP_FILTER_NONE;
+ ice_set_rx_tstamp(pf, false);
}
/**
- * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
+ * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
* @pf: Board private structure
- * @ena: bool value to enable or disable time stamp
*
- * This function will configure timestamping during PTP initialization
- * and deinitialization
+ * Called at the end of rebuild to restore timestamp configuration after
+ * a device reset.
*/
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
{
- ice_set_tx_tstamp(pf, ena);
- ice_set_rx_tstamp(pf, ena);
+ struct ice_hw *hw = &pf->hw;
+ bool enable_rx;
+
+ ice_ptp_cfg_tx_interrupt(pf);
+
+ enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+ ice_set_rx_tstamp(pf, enable_rx);
+
+ /* Trigger an immediate software interrupt to ensure that timestamps
+ * which occurred during reset are handled now.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
}
/**
@@ -2037,10 +2058,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- ice_set_tx_tstamp(pf, false);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ON:
- ice_set_tx_tstamp(pf, true);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
break;
default:
return -ERANGE;
@@ -2048,7 +2069,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- ice_set_rx_tstamp(pf, false);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
@@ -2064,12 +2085,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- ice_set_rx_tstamp(pf, true);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
+ /* Immediately update the device timestamping mode */
+ ice_ptp_restore_timestamp_mode(pf);
+
return 0;
}
@@ -2737,7 +2761,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf)
clear_bit(ICE_FLAG_PTP, pf->flags);
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
kthread_cancel_delayed_work_sync(&ptp->work);
@@ -2803,15 +2827,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) {
- /* The clock owner for this device type handles the timestamp
- * interrupt for all ports.
- */
- ice_ptp_configure_tx_tstamp(pf, true);
-
- /* React on all quads interrupts for E82x */
- wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
-
+ if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
err = ice_ptp_tx_ena_intr(pf, true, itr);
if (err)
@@ -2881,13 +2897,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
case ICE_PHY_E822:
- /* Non-owner PFs don't react to any interrupts on E82x,
- * neither on own quad nor on others
- */
- if (!ice_ptp_pf_handles_tx_interrupt(pf)) {
- ice_ptp_configure_tx_tstamp(pf, false);
- wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
- }
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
@@ -3032,6 +3041,9 @@ void ice_ptp_init(struct ice_pf *pf)
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
+ /* Configure initial Tx interrupt settings */
+ ice_ptp_cfg_tx_interrupt(pf);
+
set_bit(ICE_FLAG_PTP, pf->flags);
err = ice_ptp_init_work(pf, ptp);
if (err)
@@ -3067,7 +3079,7 @@ void ice_ptp_release(struct ice_pf *pf)
return;
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
ice_ptp_remove_auxbus_device(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 8f6f94392756..06a330867fc9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -292,7 +292,7 @@ int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -317,8 +317,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
-static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
-
+static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { }
static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
static inline s8
ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 2a5e6616cc0a..e1494f24f661 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -374,16 +374,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
- struct ice_pf *pf;
-
if (!vf || !q_vector)
return -EINVAL;
- pf = vf->pf;
-
/* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
- q_vector->v_idx + 1;
+ return vf->first_vector_idx + q_vector->v_idx + 1;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 52d0a126eb61..9e97ea863068 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2306,9 +2306,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
return;
- if (!tx_ring->ptp_tx)
- return;
-
/* Tx timestamps cannot be sampled when doing TSO */
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 166413fc33f4..daf7b9dbb143 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -380,7 +380,6 @@ struct ice_tx_ring {
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index aca1f2ea5034..b7ae09952156 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -829,12 +829,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
bool rsd;
dev = ice_pf_to_dev(pf);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -845,6 +849,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
@@ -937,6 +952,11 @@ out_unlock:
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index d7b10dc67f03..80dc4bcdd3a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -32,7 +32,6 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
/* setup inner VLAN ops */
vlan_ops = &vsi->inner_vlan_ops;
@@ -47,8 +46,13 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
+
+ /* all Rx traffic should be in the domain of the assigned port VLAN,
+ * so prevent disabling Rx VLAN filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
+
vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
}
@@ -77,6 +81,8 @@ static void ice_port_vlan_off(struct ice_vsi *vsi)
vlan_ops->del_vlan = ice_vsi_del_vlan;
}
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@@ -141,7 +147,6 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
&vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index cdf17b1e2f25..1c7b4ded948b 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1523,7 +1523,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
u16 num_q_vectors_mapped, vsi_id, vector_id;
struct virtchnl_irq_map_info *irqmap_info;
struct virtchnl_vector_map *map;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
@@ -1535,7 +1534,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->vfs.num_msix_per < num_q_vectors_mapped ||
+ vf->num_msix < num_q_vectors_mapped ||
!num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -1557,7 +1556,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < pf->vfs.num_msix_per) ||
+ if (!(vector_id < vf->num_msix) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1603,9 +1602,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
int i = -1, q_idx;
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -1729,6 +1743,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
@@ -1743,6 +1762,11 @@ error_param:
vf->vf_id, i);
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
ice_lag_move_new_vf_nodes(vf);
/* send the response to the VF */