aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/Kconfig29
-rw-r--r--drivers/net/ethernet/intel/Makefile3
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c46
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h20
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c92
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c56
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c198
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c22
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h287
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c467
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c287
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debug.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c344
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1630
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c1034
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h76
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c303
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h143
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c222
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c86
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c295
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c268
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c303
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c568
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h147
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h90
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c129
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile10
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c (renamed from drivers/net/ethernet/intel/ice/ice_devlink.c)656
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h (renamed from drivers/net/ethernet/intel/ice/ice_devlink.h)1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c430
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h247
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c279
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c542
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c341
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c674
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c643
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c215
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c191
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c482
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c472
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c126
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c233
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h316
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c635
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c852
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c691
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c501
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c324
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c136
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c410
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c212
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c237
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c191
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c55
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h153
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c108
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c88
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2282
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h70
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h30
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c29
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c19
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c17
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c102
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c139
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c43
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h105
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c115
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c302
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c315
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c101
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c108
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c214
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c435
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h112
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c110
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c183
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c80
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c355
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h231
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c118
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c454
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c12
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile6
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c150
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile6
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c124
218 files changed, 15000 insertions, 12003 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 06ddd7147c7f..e0287fbd501d 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -16,6 +16,9 @@ config NET_VENDOR_INTEL
if NET_VENDOR_INTEL
+source "drivers/net/ethernet/intel/libeth/Kconfig"
+source "drivers/net/ethernet/intel/libie/Kconfig"
+
config E100
tristate "Intel(R) PRO/100+ support"
depends on PCI
@@ -41,7 +44,7 @@ config E100
config E1000
tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -225,6 +228,7 @@ config I40E
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI
select AUXILIARY_BUS
+ select LIBIE
select NET_DEVLINK
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -253,6 +257,8 @@ config I40E_DCB
# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF
config IAVF
tristate
+ select LIBIE
+
config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
@@ -283,6 +289,7 @@ config ICE
depends on GNSS || GNSS = n
select AUXILIARY_BUS
select DIMLIB
+ select LIBIE
select NET_DEVLINK
select PLDMFW
select DPLL
@@ -299,6 +306,17 @@ config ICE
To compile this driver as a module, choose M here. The module
will be called ice.
+config ICE_HWMON
+ bool "Intel(R) Ethernet Connection E800 Series Support HWMON support"
+ default y
+ depends on ICE && HWMON && !(ICE=y && HWMON=m)
+ help
+ Say Y if you want to expose thermal sensor data on Intel devices.
+
+ Some of our devices contain internal thermal sensors.
+ This data is available via the hwmon sysfs interface and exposes
+ the onboard sensors.
+
config ICE_SWITCHDEV
bool "Switchdev Support"
default y
@@ -357,6 +375,15 @@ config IGC
To compile this driver as a module, choose M here. The module
will be called igc.
+
+config IGC_LEDS
+ def_bool LEDS_TRIGGER_NETDEV
+ depends on IGC && LEDS_CLASS
+ depends on LEDS_CLASS=y || IGC=m
+ help
+ Optional support for controlling the NIC LED's with the netdev
+ LED trigger.
+
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index dacb481ee5b1..04c844ef4964 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -3,6 +3,9 @@
# Makefile for the Intel network device drivers.
#
+obj-$(CONFIG_LIBETH) += libeth/
+obj-$(CONFIG_LIBIE) += libie/
+
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 01f0f12035ca..9b068d40778d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -171,8 +171,8 @@ static int debug = 3;
static int eeprom_bad_csum_allow = 0;
static int use_io = 0;
module_param(debug, int, 0);
-module_param(eeprom_bad_csum_allow, int, 0);
-module_param(use_io, int, 0);
+module_param(eeprom_bad_csum_allow, int, 0444);
+module_param(use_io, int, 0444);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
@@ -3037,7 +3037,7 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake)
return 0;
}
-static int __maybe_unused e100_suspend(struct device *dev_d)
+static int e100_suspend(struct device *dev_d)
{
bool wake;
@@ -3046,7 +3046,7 @@ static int __maybe_unused e100_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused e100_resume(struct device *dev_d)
+static int e100_resume(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct nic *nic = netdev_priv(netdev);
@@ -3163,7 +3163,7 @@ static const struct pci_error_handlers e100_err_handler = {
.resume = e100_io_resume,
};
-static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
static struct pci_driver e100_driver = {
.name = DRV_NAME,
@@ -3172,7 +3172,7 @@ static struct pci_driver e100_driver = {
.remove = e100_remove,
/* Power Management hooks */
- .driver.pm = &e100_pm_ops,
+ .driver.pm = pm_sleep_ptr(&e100_pm_ops),
.shutdown = e100_shutdown,
.err_handler = &e100_err_handler,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4542e2bc28e8..f9328f2e669f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -5,6 +5,7 @@
* Shared functions for accessing and configuring the MAC
*/
+#include <linux/bitfield.h>
#include "e1000.h"
static s32 e1000_check_downshift(struct e1000_hw *hw);
@@ -3260,8 +3261,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->mdix_mode =
- (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
- IGP01E1000_PSSR_MDIX_SHIFT);
+ (e1000_auto_x_mode)FIELD_GET(IGP01E1000_PSSR_MDIX, phy_data);
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -3272,11 +3272,11 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
/* Get cable length */
@@ -3326,14 +3326,12 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->extended_10bt_distance =
- ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
- M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+ FIELD_GET(M88E1000_PSCR_10BT_EXT_DIST_ENABLE, phy_data) ?
e1000_10bt_ext_dist_enable_lower :
e1000_10bt_ext_dist_enable_normal;
phy_info->polarity_correction =
- ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
- M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+ FIELD_GET(M88E1000_PSCR_POLARITY_REVERSAL, phy_data) ?
e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
/* Check polarity status */
@@ -3347,27 +3345,25 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->mdix_mode =
- (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
- M88E1000_PSSR_MDIX_SHIFT);
+ (e1000_auto_x_mode)FIELD_GET(M88E1000_PSSR_MDIX, phy_data);
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
/* Cable Length Estimation and Local/Remote Receiver Information
* are only valid at 1000 Mbps.
*/
phy_info->cable_length =
- (e1000_cable_length) ((phy_data &
- M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+ (e1000_cable_length)FIELD_GET(M88E1000_PSSR_CABLE_LENGTH,
+ phy_data);
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
return ret_val;
- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
}
@@ -3515,7 +3511,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
if (ret_val)
return ret_val;
eeprom_size =
- (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+ FIELD_GET(EEPROM_SIZE_MASK, eeprom_size);
/* 256B eeprom size was not supported in earlier hardware, so we
* bump eeprom_size up one to ensure that "1" (which maps to
* 256B) is never the result used in the shifting logic below.
@@ -4891,8 +4887,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
&phy_data);
if (ret_val)
return ret_val;
- cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ cable_length = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
/* Convert the enum value to ranged values */
switch (cable_length) {
@@ -5001,8 +4996,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
&phy_data);
if (ret_val)
return ret_val;
- *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
- M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+ *polarity = FIELD_GET(M88E1000_PSSR_REV_POLARITY, phy_data) ?
e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
} else if (hw->phy_type == e1000_phy_igp) {
@@ -5072,8 +5066,8 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
- M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ hw->speed_downgraded = FIELD_GET(M88E1000_PSSR_DOWNSHIFT,
+ phy_data);
}
return E1000_SUCCESS;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1d1e93686af2..60fff9a6c53e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -149,8 +149,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-static int __maybe_unused e1000_suspend(struct device *dev);
-static int __maybe_unused e1000_resume(struct device *dev);
+static int e1000_suspend(struct device *dev);
+static int e1000_resume(struct device *dev);
static void e1000_shutdown(struct pci_dev *pdev);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -175,16 +175,14 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
- .driver = {
- .pm = &e1000_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&e1000_pm_ops),
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
@@ -3571,7 +3569,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
e1000_up(adapter);
@@ -5135,7 +5133,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-static int __maybe_unused e1000_suspend(struct device *dev)
+static int e1000_suspend(struct device *dev)
{
int retval;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -5147,7 +5145,7 @@ static int __maybe_unused e1000_suspend(struct device *dev)
return retval;
}
-static int __maybe_unused e1000_resume(struct device *dev)
+static int e1000_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index be9c695dde12..4eb1ceaf865a 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -92,8 +92,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -1035,17 +1034,18 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
- 0xFFFF);
+ /* these next three accesses were always meant to use page 0x34 using
+ * GG82563_REG(0x34, N) but never did, so we've just corrected the call
+ * to not drop bits
+ */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 4, 0xFFFF);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- &reg_data);
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 9, &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- reg_data);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 9, reg_data);
if (ret_val)
return ret_val;
ret_val =
@@ -1209,8 +1209,8 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
if (ret_val)
return ret_val;
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) |
+ E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1244,8 +1244,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
if (ret_val)
return ret_val;
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b1e890dd583..969f855a79ee 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -157,8 +157,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
fallthrough;
default:
nvm->type = e1000_nvm_eeprom_spi;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 63c3c79380a1..5e2cfa73f889 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -678,11 +678,6 @@
/* PCI/PCI-X/PCI-EX Config space */
#define PCI_HEADER_TYPE_REGISTER 0x0E
-#define PCIE_LINK_STATUS 0x12
-
-#define PCI_HEADER_TYPE_MULTIFUNC 0x80
-#define PCIE_LINK_WIDTH_MASK 0x3F0
-#define PCIE_LINK_WIDTH_SHIFT 4
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index a187582d2299..ba9c19e6994c 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
* INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
* bits to count nanoseconds leaving the rest for fractional nonseconds.
+ *
+ * Any given INCVALUE also has an associated maximum adjustment value. This
+ * maximum adjustment value is the largest increase (or decrease) which can be
+ * safely applied without overflowing the INCVALUE. Since INCVALUE has
+ * a maximum range of 24 bits, its largest value is 0xFFFFFF.
+ *
+ * To understand where the maximum value comes from, consider the following
+ * equation:
+ *
+ * new_incval = base_incval + (base_incval * adjustment) / 1billion
+ *
+ * To avoid overflow that means:
+ * max_incval = base_incval + (base_incval * max_adj) / billion
+ *
+ * Re-arranging:
+ * max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion)
*/
#define INCVALUE_96MHZ 125
#define INCVALUE_SHIFT_96MHZ 17
#define INCPERIOD_SHIFT_96MHZ 2
#define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ)
+#define MAX_PPB_96MHZ 23999900 /* 23,999,900 ppb */
#define INCVALUE_25MHZ 40
#define INCVALUE_SHIFT_25MHZ 18
#define INCPERIOD_25MHZ 1
+#define MAX_PPB_25MHZ 599999900 /* 599,999,900 ppb */
#define INCVALUE_24MHZ 125
#define INCVALUE_SHIFT_24MHZ 14
#define INCPERIOD_24MHZ 3
+#define MAX_PPB_24MHZ 999999999 /* 999,999,999 ppb */
#define INCVALUE_38400KHZ 26
#define INCVALUE_SHIFT_38400KHZ 19
#define INCPERIOD_38400KHZ 1
+#define MAX_PPB_38400KHZ 230769100 /* 230,769,100 ppb */
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 9835e6a90d56..85da20778e0f 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -156,7 +156,7 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
speed = adapter->link_speed;
cmd->base.duplex = adapter->link_duplex - 1;
}
- } else if (!pm_runtime_suspended(netdev->dev.parent)) {
+ } else {
u32 status = er32(STATUS);
if (status & E1000_STATUS_LU) {
@@ -274,16 +274,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
- pm_runtime_get_sync(netdev->dev.parent);
-
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
- ret_val = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* MDI setting is only allowed when autoneg enabled because
@@ -291,16 +288,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
* duplex is forced.
*/
if (cmd->base.eth_tp_mdix_ctrl) {
- if (hw->phy.media_type != e1000_media_type_copper) {
- ret_val = -EOPNOTSUPP;
- goto out;
- }
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
- ret_val = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
@@ -347,7 +341,6 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
}
out:
- pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return ret_val;
}
@@ -383,8 +376,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- pm_runtime_get_sync(netdev->dev.parent);
-
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
@@ -417,7 +408,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
}
out:
- pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return retval;
}
@@ -448,8 +438,6 @@ static void e1000_get_regs(struct net_device *netdev,
u32 *regs_buff = p;
u16 phy_data;
- pm_runtime_get_sync(netdev->dev.parent);
-
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1u << 24) |
@@ -495,8 +483,6 @@ static void e1000_get_regs(struct net_device *netdev,
e1e_rphy(hw, MII_STAT1000, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
-
- pm_runtime_put_sync(netdev->dev.parent);
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -529,8 +515,6 @@ static int e1000_get_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
- pm_runtime_get_sync(netdev->dev.parent);
-
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
ret_val = e1000_read_nvm(hw, first_word,
last_word - first_word + 1,
@@ -544,8 +528,6 @@ static int e1000_get_eeprom(struct net_device *netdev,
}
}
- pm_runtime_put_sync(netdev->dev.parent);
-
if (ret_val) {
/* a read error occurred, throw away the result */
memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -595,8 +577,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
- pm_runtime_get_sync(netdev->dev.parent);
-
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
@@ -637,7 +617,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000e_update_nvm_checksum(hw);
out:
- pm_runtime_put_sync(netdev->dev.parent);
kfree(eeprom_buff);
return ret_val;
}
@@ -654,8 +633,8 @@ static void e1000_get_drvinfo(struct net_device *netdev,
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d-%d",
- (adapter->eeprom_vers & 0xF000) >> 12,
- (adapter->eeprom_vers & 0x0FF0) >> 4,
+ FIELD_GET(0xF000, adapter->eeprom_vers),
+ FIELD_GET(0x0FF0, adapter->eeprom_vers),
(adapter->eeprom_vers & 0x000F));
strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
@@ -733,8 +712,6 @@ static int e1000_set_ringparam(struct net_device *netdev,
}
}
- pm_runtime_get_sync(netdev->dev.parent);
-
e1000e_down(adapter, true);
/* We can't just free everything and then setup again, because the
@@ -773,7 +750,6 @@ err_setup_rx:
e1000e_free_tx_resources(temp_tx);
err_setup:
e1000e_up(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
free_temp:
vfree(temp_tx);
vfree(temp_rx);
@@ -925,8 +901,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
}
if (mac->type >= e1000_pch_lpt)
- wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
- E1000_FWSM_WLOCK_MAC_SHIFT;
+ wlock_mac = FIELD_GET(E1000_FWSM_WLOCK_MAC_MASK, er32(FWSM));
for (i = 0; i < mac->rar_entry_count; i++) {
if (mac->type >= e1000_pch_lpt) {
@@ -1817,8 +1792,6 @@ static void e1000_diag_test(struct net_device *netdev,
u8 autoneg;
bool if_running = netif_running(netdev);
- pm_runtime_get_sync(netdev->dev.parent);
-
set_bit(__E1000_TESTING, &adapter->state);
if (!if_running) {
@@ -1904,8 +1877,6 @@ static void e1000_diag_test(struct net_device *netdev,
}
msleep_interruptible(4 * 1000);
-
- pm_runtime_put_sync(netdev->dev.parent);
}
static void e1000_get_wol(struct net_device *netdev,
@@ -2047,15 +2018,11 @@ static int e1000_set_coalesce(struct net_device *netdev,
adapter->itr_setting = adapter->itr & ~3;
}
- pm_runtime_get_sync(netdev->dev.parent);
-
if (adapter->itr_setting != 0)
e1000e_write_itr(adapter, adapter->itr);
else
e1000e_write_itr(adapter, 0);
- pm_runtime_put_sync(netdev->dev.parent);
-
return 0;
}
@@ -2069,9 +2036,7 @@ static int e1000_nway_reset(struct net_device *netdev)
if (!adapter->hw.mac.autoneg)
return -EINVAL;
- pm_runtime_get_sync(netdev->dev.parent);
e1000e_reinit_locked(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
return 0;
}
@@ -2085,12 +2050,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
int i;
char *p = NULL;
- pm_runtime_get_sync(netdev->dev.parent);
-
dev_get_stats(netdev, &net_stats);
- pm_runtime_put_sync(netdev->dev.parent);
-
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
@@ -2147,9 +2108,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
u32 mrqc;
- pm_runtime_get_sync(netdev->dev.parent);
mrqc = er32(MRQC);
- pm_runtime_put_sync(netdev->dev.parent);
if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
@@ -2187,7 +2146,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}
-static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2212,28 +2171,24 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- pm_runtime_get_sync(netdev->dev.parent);
-
ret_val = hw->phy.ops.acquire(hw);
- if (ret_val) {
- pm_runtime_put_sync(netdev->dev.parent);
+ if (ret_val)
return -EBUSY;
- }
/* EEE Capability */
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
if (ret_val)
goto release;
- edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->supported, phy_data);
/* EEE Advertised */
- edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert);
/* EEE Link Partner Advertised */
ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
if (ret_val)
goto release;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
/* EEE PCS Status */
ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
@@ -2258,16 +2213,16 @@ release:
if (ret_val)
ret_val = -ENODATA;
- pm_runtime_put_sync(netdev->dev.parent);
-
return ret_val;
}
-static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
ret_val = e1000e_get_eee(netdev, &eee_curr);
@@ -2284,25 +2239,26 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
- pm_runtime_get_sync(netdev->dev.parent);
-
/* reset the link */
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
else
e1000e_reset(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1fef6bb5a5fb..4b6e7536170a 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -628,6 +628,7 @@ struct e1000_phy_info {
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
+ u32 retry_count;
enum e1000_media_type media_type;
@@ -644,6 +645,7 @@ struct e1000_phy_info {
bool polarity_correction;
bool speed_downgraded;
bool autoneg_wait_to_complete;
+ bool retry_enabled;
};
struct e1000_nvm_info {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 39e9fc601bf5..f9e94be36e97 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -222,11 +222,18 @@ out:
if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
@@ -310,6 +317,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
goto out;
}
+ /* There is no guarantee that the PHY is accessible at this time
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
* inaccessible and resetting the PHY is not blocked, toggle the
* LANPHYPC Value bit to force the interconnect to PCIe mode.
@@ -380,6 +392,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
break;
}
+ e1000e_enable_phy_retry(hw);
+
hw->phy.ops.release(hw);
if (!ret_val) {
@@ -449,6 +463,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->id = e1000_phy_unknown;
+ if (hw->mac.type == e1000_pch_mtp) {
+ phy->retry_count = 2;
+ e1000e_enable_phy_retry(hw);
+ }
+
ret_val = e1000_init_phy_workarounds_pchlan(hw);
if (ret_val)
return ret_val;
@@ -1072,13 +1091,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
(1U << (E1000_LTRV_SCALE_FACTOR *
- ((lat_enc & E1000_LTRV_SCALE_MASK)
- >> E1000_LTRV_SCALE_SHIFT)));
+ FIELD_GET(E1000_LTRV_SCALE_MASK, lat_enc)));
max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
- (1U << (E1000_LTRV_SCALE_FACTOR *
- ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
- >> E1000_LTRV_SCALE_SHIFT)));
+ (1U << (E1000_LTRV_SCALE_FACTOR *
+ FIELD_GET(E1000_LTRV_SCALE_MASK, max_ltr_enc)));
if (lat_enc_d > max_ltr_enc_d)
lat_enc = max_ltr_enc;
@@ -1148,18 +1165,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
- /* Force SMBus mode in PHY */
- ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
- if (ret_val)
- goto release;
- phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
- e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Force SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
-
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
@@ -1315,6 +1320,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
/* Toggle LANPHYPC Value bit */
e1000_toggle_lanphypc_pch_lpt(hw);
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
if (ret_val) {
@@ -1335,6 +1345,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
@@ -2075,8 +2087,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
{
u16 phy_data;
u32 strap = er32(STRAP);
- u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
- E1000_STRAP_SMT_FREQ_SHIFT;
+ u32 freq = FIELD_GET(E1000_STRAP_SMT_FREQ_MASK, strap);
s32 ret_val;
strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -2562,8 +2573,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
(u16)(mac_reg & 0xFFFF));
hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
- (u16)((mac_reg & E1000_RAH_AV)
- >> 16));
+ (u16)((mac_reg & E1000_RAH_AV) >> 16));
}
e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
@@ -3205,7 +3215,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
&nvm_dword);
if (ret_val)
return ret_val;
- sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ sig_byte = FIELD_GET(0xFF00, nvm_dword);
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
E1000_ICH_NVM_SIG_VALUE) {
*bank = 0;
@@ -3218,7 +3228,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
&nvm_dword);
if (ret_val)
return ret_val;
- sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ sig_byte = FIELD_GET(0xFF00, nvm_dword);
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
E1000_ICH_NVM_SIG_VALUE) {
*bank = 1;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 5df7ad93f3d7..d7df2a0ed629 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
+
#include "e1000.h"
/**
@@ -13,21 +15,17 @@
**/
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
{
+ struct pci_dev *pdev = hw->adapter->pdev;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
- struct e1000_adapter *adapter = hw->adapter;
- u16 pcie_link_status, cap_offset;
+ u16 pcie_link_status;
- cap_offset = adapter->pdev->pcie_cap;
- if (!cap_offset) {
+ if (!pci_pcie_cap(pdev)) {
bus->width = e1000_bus_width_unknown;
} else {
- pci_read_config_word(adapter->pdev,
- cap_offset + PCIE_LINK_STATUS,
- &pcie_link_status);
- bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCIE_LINK_WIDTH_MASK) >>
- PCIE_LINK_WIDTH_SHIFT);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &pcie_link_status);
+ bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW,
+ pcie_link_status);
}
mac->ops.set_lan_id(hw);
@@ -52,7 +50,7 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
* for the device regardless of function swap state.
*/
reg = er32(STATUS);
- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+ bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f536c856727c..220d62fca55d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1788,8 +1788,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
@@ -1868,8 +1867,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
@@ -5031,8 +5029,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
}
}
@@ -6041,7 +6038,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->max_frame_size = max_frame;
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
pm_runtime_get_sync(netdev->dev.parent);
@@ -6249,7 +6246,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg |= BM_RCTL_MPE;
phy_reg &= ~(BM_RCTL_MO_MASK);
if (mac_reg & E1000_RCTL_MO_3)
- phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+ phy_reg |= (FIELD_GET(E1000_RCTL_MO_3, mac_reg)
<< BM_RCTL_MO_SHIFT);
if (mac_reg & E1000_RCTL_BAM)
phy_reg |= BM_RCTL_BAM;
@@ -6626,6 +6623,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6691,14 +6689,31 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) {
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
+ if (retval)
+ return retval;
+ }
+
+ /* Force SMBUS to allow WOL */
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
- if (retval)
- return retval;
+ e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
+ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
+
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in MAC */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
@@ -6953,13 +6968,13 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
-static __maybe_unused int e1000e_pm_prepare(struct device *dev)
+static int e1000e_pm_prepare(struct device *dev)
{
return pm_runtime_suspended(dev) &&
pm_suspend_via_firmware();
}
-static __maybe_unused int e1000e_pm_suspend(struct device *dev)
+static int e1000e_pm_suspend(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6982,7 +6997,7 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
return rc;
}
-static __maybe_unused int e1000e_pm_resume(struct device *dev)
+static int e1000e_pm_resume(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -7016,7 +7031,7 @@ static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
return -EBUSY;
}
-static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
+static int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7035,7 +7050,7 @@ static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
return rc;
}
-static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
+static int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7922,8 +7937,7 @@ static const struct pci_device_id e1000_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-static const struct dev_pm_ops e1000_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
+static const struct dev_pm_ops e1000e_pm_ops = {
.prepare = e1000e_pm_prepare,
.suspend = e1000e_pm_suspend,
.resume = e1000e_pm_resume,
@@ -7931,9 +7945,8 @@ static const struct dev_pm_ops e1000_pm_ops = {
.thaw = e1000e_pm_thaw,
.poweroff = e1000e_pm_suspend,
.restore = e1000e_pm_resume,
-#endif
- SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
- e1000e_pm_runtime_idle)
+ RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+ e1000e_pm_runtime_idle)
};
/* PCI Device API Driver */
@@ -7942,9 +7955,7 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
- .driver = {
- .pm = &e1000_pm_ops,
- },
+ .driver.pm = pm_ptr(&e1000e_pm_ops),
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 08c3d477dd6f..f7ae0e0aa4a4 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -107,6 +107,16 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
}
+void e1000e_disable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = false;
+}
+
+void e1000e_enable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = true;
+}
+
/**
* e1000e_read_phy_reg_mdic - Read MDI control register
* @hw: pointer to the HW structure
@@ -118,57 +128,73 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
**/
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = ((offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
- e_dbg("MDI Read offset error - requested %d, returned %d\n",
- offset,
- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
- return -E1000_ERR_PHY;
- }
- *data = (u16)mdic;
+ ew32(MDIC, mdic);
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Read offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
- return 0;
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
+ if (success) {
+ *data = (u16)mdic;
+ return 0;
+ }
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
+ }
+
+ return -E1000_ERR_PHY;
}
/**
@@ -181,57 +207,72 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
**/
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = (((u32)data) |
- (offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Write PHY Red Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
- e_dbg("MDI Write offset error - requested %d, returned %d\n",
- offset,
- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
- return -E1000_ERR_PHY;
- }
+ ew32(MDIC, mdic);
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Write PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Write offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
- return 0;
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
+ if (success)
+ return 0;
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
+ }
+
+ return -E1000_ERR_PHY;
}
/**
@@ -463,8 +504,8 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) |
+ E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -536,8 +577,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1793,8 +1833,7 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY;
@@ -3234,8 +3273,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
- I82577_DSTATUS_CABLE_LENGTH_SHIFT);
+ length = FIELD_GET(I82577_DSTATUS_CABLE_LENGTH, phy_data);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
return -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index c48777d09523..049bb325b4b1 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -51,6 +51,8 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
void e1000_power_up_phy_copper(struct e1000_hw *hw);
void e1000_power_down_phy_copper(struct e1000_hw *hw);
+void e1000e_disable_phy_retry(struct e1000_hw *hw);
+void e1000e_enable_phy_retry(struct e1000_hw *hw);
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 02d871bc112a..bbcfd529399b 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch2lan:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+ break;
case e1000_pch_lpt:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+ else
+ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
+ break;
case e1000_pch_spt:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ break;
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
@@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lnp:
case e1000_pch_ptp:
case e1000_pch_nvp:
- if ((hw->mac.type < e1000_pch_lpt) ||
- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
- adapter->ptp_clock_info.max_adj = 24000000 - 1;
- break;
- }
- fallthrough;
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ else
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
- adapter->ptp_clock_info.max_adj = 600000000 - 1;
+ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 13a05604dcc0..1bc5b6c0b897 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1057,16 +1057,16 @@ static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
}
-static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int fm10k_get_rssh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
+ u8 *key = rxfh->key;
int i, err;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- err = fm10k_get_reta(netdev, indir);
+ err = fm10k_get_reta(netdev, rxfh->indir);
if (err || !key)
return err;
@@ -1076,23 +1076,25 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
-static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int fm10k_set_rssh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw;
int i, err;
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- err = fm10k_set_reta(netdev, indir);
- if (err || !key)
+ err = fm10k_set_reta(netdev, rxfh->indir);
+ if (err || !rxfh->key)
return err;
- for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) {
- u32 rssrk = le32_to_cpu(*(__le32 *)key);
+ for (i = 0; i < FM10K_RSSRK_SIZE; i++, rxfh->key += 4) {
+ u32 rssrk = le32_to_cpu(*(__le32 *)rxfh->key);
if (interface->rssrk[i] == rssrk)
continue;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index d748b98274e7..92de609b7218 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2342,7 +2342,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us.
**/
-static int __maybe_unused fm10k_resume(struct device *dev)
+static int fm10k_resume(struct device *dev)
{
struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
@@ -2369,7 +2369,7 @@ static int __maybe_unused fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us.
**/
-static int __maybe_unused fm10k_suspend(struct device *dev)
+static int fm10k_suspend(struct device *dev)
{
struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
@@ -2502,16 +2502,14 @@ static const struct pci_error_handlers fm10k_err_handler = {
.reset_done = fm10k_io_reset_done,
};
-static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
static struct pci_driver fm10k_driver = {
.name = fm10k_driver_name,
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
- .driver = {
- .pm = &fm10k_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&fm10k_pm_ops),
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index af1b0cde3670..98861cc6df7c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2019 Intel Corporation. */
+#include <linux/bitfield.h>
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -865,8 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
* register is RO from the VF, so the PF must do this even in the
* case of notifying the VF of a new VID via the mailbox.
*/
- txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
- FM10K_TXQCTL_VID_MASK;
+ txqctl = FIELD_PREP(FM10K_TXQCTL_VID_MASK, vf_vid);
txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
FM10K_TXQCTL_VF | vf_idx;
@@ -1575,8 +1575,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
if (func & FM10K_FAULT_FUNC_PF)
fault->func = 0;
else
- fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
- FM10K_FAULT_FUNC_VF_SHIFT);
+ fault->func = 1 + FIELD_GET(FM10K_FAULT_FUNC_VF_MASK, func);
/* record fault type */
fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index dc8ccd378ec9..7fb1961f2921 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2019 Intel Corporation. */
+#include <linux/bitfield.h>
#include "fm10k_vf.h"
/**
@@ -126,15 +127,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
hw->mac.max_queues = i;
/* fetch default VLAN and ITR scale */
- hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) &
- FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
+ hw->mac.default_vid = FIELD_GET(FM10K_TXQCTL_VID_MASK,
+ fm10k_read_reg(hw, FM10K_TXQCTL(0)));
/* Read the ITR scale from TDLEN. See the definition of
* FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is
* used here.
*/
- hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) &
- FM10K_TDLEN_ITR_SCALE_MASK) >>
- FM10K_TDLEN_ITR_SCALE_SHIFT;
+ hw->mac.itr_scale = FIELD_GET(FM10K_TDLEN_ITR_SCALE_MASK,
+ fm10k_read_reg(hw, FM10K_TDLEN(0)));
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1bf424ac3145..bca2084cc54b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -24,6 +24,7 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_NUM_DESCRIPTORS_XL710 8160
#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
@@ -33,11 +34,11 @@
#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
- (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
+ (test_bit(I40E_HW_CAP_RSS_AQ, (pf)->hw.caps) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_MAX_VF_QUEUES 16
#define i40e_pf_get_max_q_per_tc(pf) \
- (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
+ (test_bit(I40E_HW_CAP_128_QP_RSS, (pf)->hw.caps) ? 128 : 64)
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
@@ -78,7 +79,7 @@
#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */
/* driver state flags */
-enum i40e_state_t {
+enum i40e_state {
__I40E_TESTING,
__I40E_CONFIG_BUSY,
__I40E_CONFIG_DONE,
@@ -126,7 +127,7 @@ enum i40e_state_t {
BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
-enum i40e_vsi_state_t {
+enum i40e_vsi_state {
__I40E_VSI_DOWN,
__I40E_VSI_NEEDS_RESTART,
__I40E_VSI_SYNCING_FILTERS,
@@ -138,6 +139,60 @@ enum i40e_vsi_state_t {
__I40E_VSI_STATE_SIZE__,
};
+enum i40e_pf_flags {
+ I40E_FLAG_MSI_ENA,
+ I40E_FLAG_MSIX_ENA,
+ I40E_FLAG_RSS_ENA,
+ I40E_FLAG_VMDQ_ENA,
+ I40E_FLAG_SRIOV_ENA,
+ I40E_FLAG_DCB_CAPABLE,
+ I40E_FLAG_DCB_ENA,
+ I40E_FLAG_FD_SB_ENA,
+ I40E_FLAG_FD_ATR_ENA,
+ I40E_FLAG_MFP_ENA,
+ I40E_FLAG_HW_ATR_EVICT_ENA,
+ I40E_FLAG_VEB_MODE_ENA,
+ I40E_FLAG_VEB_STATS_ENA,
+ I40E_FLAG_LINK_POLLING_ENA,
+ I40E_FLAG_TRUE_PROMISC_ENA,
+ I40E_FLAG_LEGACY_RX_ENA,
+ I40E_FLAG_PTP_ENA,
+ I40E_FLAG_IWARP_ENA,
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ I40E_FLAG_SOURCE_PRUNING_DIS,
+ I40E_FLAG_TC_MQPRIO_ENA,
+ I40E_FLAG_FD_SB_INACTIVE,
+ I40E_FLAG_FD_SB_TO_CLOUD_FILTER,
+ I40E_FLAG_FW_LLDP_DIS,
+ I40E_FLAG_RS_FEC,
+ I40E_FLAG_BASE_R_FEC,
+ /* TOTAL_PORT_SHUTDOWN_ENA
+ * Allows to physically disable the link on the NIC's port.
+ * If enabled, (after link down request from the OS)
+ * no link, traffic or led activity is possible on that port.
+ *
+ * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA is set, the
+ * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA must be explicitly forced
+ * to true and cannot be disabled by system admin at that time.
+ * The functionalities are exclusive in terms of configuration, but
+ * they also have similar behavior (allowing to disable physical
+ * link of the port), with following differences:
+ * - LINK_DOWN_ON_CLOSE_ENA is configurable at host OS run-time and
+ * is supported by whole family of 7xx Intel Ethernet Controllers
+ * - TOTAL_PORT_SHUTDOWN_ENA may be enabled only before OS loads
+ * (in BIOS) only if motherboard's BIOS and NIC's FW has support of it
+ * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought
+ * down by sending phy_type=0 to NIC's FW
+ * - when TOTAL_PORT_SHUTDOWN_ENA is used, phy_type is not altered,
+ * instead the link is being brought down by clearing
+ * bit (I40E_AQ_PHY_ENABLE_LINK) in abilities field of
+ * i40e_aq_set_phy_config structure
+ */
+ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
+ I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_PF_FLAGS_NBITS, /* must be last */
+};
+
enum i40e_interrupt_policy {
I40E_INTERRUPT_BEST_CASE,
I40E_INTERRUPT_MEDIUM,
@@ -413,9 +468,7 @@ struct i40e_pf {
struct i40e_hw hw;
DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries;
- bool fc_autoneg_status;
- u16 eeprom_version;
u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
@@ -431,7 +484,6 @@ struct i40e_pf {
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u16 num_alloc_vsi; /* num VSIs this driver supports */
- u8 atr_sample_rate;
bool wol_en;
struct hlist_head fdir_filter_list;
@@ -469,89 +521,16 @@ struct i40e_pf {
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;
- enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
u32 msg_enable;
char int_name[I40E_INT_NAME_STR_LEN];
- u16 adminq_work_limit; /* num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
- u32 hw_features;
-#define I40E_HW_RSS_AQ_CAPABLE BIT(0)
-#define I40E_HW_128_QP_RSS_CAPABLE BIT(1)
-#define I40E_HW_ATR_EVICT_CAPABLE BIT(2)
-#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3)
-#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4)
-#define I40E_HW_NO_PCI_LINK_CHECK BIT(5)
-#define I40E_HW_100M_SGMII_CAPABLE BIT(6)
-#define I40E_HW_NO_DCB_SUPPORT BIT(7)
-#define I40E_HW_USE_SET_LLDP_MIB BIT(8)
-#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
-#define I40E_HW_PTP_L4_CAPABLE BIT(10)
-#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
-#define I40E_HW_HAVE_CRT_RETIMER BIT(13)
-#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
-#define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
-#define I40E_HW_STOP_FW_LLDP BIT(16)
-#define I40E_HW_PORT_ID_VALID BIT(17)
-#define I40E_HW_RESTART_AUTONEG BIT(18)
-
- u32 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40E_FLAG_MSI_ENABLED BIT(1)
-#define I40E_FLAG_MSIX_ENABLED BIT(2)
-#define I40E_FLAG_RSS_ENABLED BIT(3)
-#define I40E_FLAG_VMDQ_ENABLED BIT(4)
-#define I40E_FLAG_SRIOV_ENABLED BIT(5)
-#define I40E_FLAG_DCB_CAPABLE BIT(6)
-#define I40E_FLAG_DCB_ENABLED BIT(7)
-#define I40E_FLAG_FD_SB_ENABLED BIT(8)
-#define I40E_FLAG_FD_ATR_ENABLED BIT(9)
-#define I40E_FLAG_MFP_ENABLED BIT(10)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT(12)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT(13)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15)
-#define I40E_FLAG_LEGACY_RX BIT(16)
-#define I40E_FLAG_PTP BIT(17)
-#define I40E_FLAG_IWARP_ENABLED BIT(18)
-#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19)
-#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20)
-#define I40E_FLAG_TC_MQPRIO BIT(21)
-#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
-#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
-#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
-#define I40E_FLAG_RS_FEC BIT(25)
-#define I40E_FLAG_BASE_R_FEC BIT(26)
-/* TOTAL_PORT_SHUTDOWN
- * Allows to physically disable the link on the NIC's port.
- * If enabled, (after link down request from the OS)
- * no link, traffic or led activity is possible on that port.
- *
- * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the
- * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true
- * and cannot be disabled by system admin at that time.
- * The functionalities are exclusive in terms of configuration, but they also
- * have similar behavior (allowing to disable physical link of the port),
- * with following differences:
- * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is
- * supported by whole family of 7xx Intel Ethernet Controllers
- * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS)
- * only if motherboard's BIOS and NIC's FW has support of it
- * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down
- * by sending phy_type=0 to NIC's FW
- * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead
- * the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK)
- * in abilities field of i40e_aq_set_phy_config structure
- */
-#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
-#define I40E_FLAG_VF_VLAN_PRUNING BIT(28)
-
+ DECLARE_BITMAP(flags, I40E_PF_FLAGS_NBITS);
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
@@ -559,7 +538,6 @@ struct i40e_pf {
u32 tx_timeout_count;
u32 tx_timeout_recovery_level;
unsigned long tx_timeout_last_recovery;
- u32 tx_sluggish_count;
u32 hw_csum_rx_error;
u32 led_status;
u16 corer_count; /* Core reset count */
@@ -581,17 +559,13 @@ struct i40e_pf {
struct i40e_lump_tracking *irq_pile;
/* switch config info */
- u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
- struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
bool cur_promisc;
- u16 instance; /* A unique number per i40e_pf instance in the system */
-
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
@@ -685,9 +659,7 @@ struct i40e_pf {
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
- struct work_struct ptp_pps_work;
struct work_struct ptp_extts0_work;
- struct work_struct ptp_extts1_work;
ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
@@ -695,10 +667,7 @@ struct i40e_pf {
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
- u64 ptp_pps_start;
- u32 pps_delay;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
- struct ptp_pin_desc ptp_pin[3];
unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
@@ -711,7 +680,6 @@ struct i40e_pf {
u32 fd_inv;
u16 phy_led_val;
- u16 override_q_count;
u16 last_sw_conf_flags;
u16 last_sw_conf_valid_flags;
/* List to keep previous DDP profiles to be rolled back in the future */
@@ -719,6 +687,54 @@ struct i40e_pf {
};
/**
+ * __i40e_pf_next_vsi - get next valid VSI
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VSI pointer in pf->vsi array and
+ * updates idx position. Returns NULL if no VSI is found.
+ **/
+static __always_inline struct i40e_vsi *
+__i40e_pf_next_vsi(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < pf->num_alloc_vsi) {
+ if (pf->vsi[*idx])
+ return pf->vsi[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_vsi(_pf, _i, _vsi) \
+ for (_i = 0, _vsi = __i40e_pf_next_vsi(_pf, &_i); \
+ _vsi; \
+ _i++, _vsi = __i40e_pf_next_vsi(_pf, &_i))
+
+/**
+ * __i40e_pf_next_veb - get next valid VEB
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VEB pointer in pf->veb array and
+ * updates idx position. Returns NULL if no VEB is found.
+ **/
+static __always_inline struct i40e_veb *
+__i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < I40E_MAX_VEB) {
+ if (pf->veb[*idx])
+ return pf->veb[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_veb(_pf, _i, _veb) \
+ for (_i = 0, _veb = __i40e_pf_next_veb(_pf, &_i); \
+ _veb; \
+ _i++, _veb = __i40e_pf_next_veb(_pf, &_i))
+
+/**
* i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
@@ -767,13 +783,11 @@ struct i40e_new_mac_filter {
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
- u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
- u16 flags;
u16 bw_limit;
u8 bw_max_quanta;
bool is_abs_credits;
@@ -940,6 +954,7 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+ bool in_busy_poll;
int irq_num; /* IRQ assigned to this q_vector */
} ____cacheline_internodealigned_in_smp;
@@ -1152,14 +1167,12 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
static inline struct i40e_vsi *
i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- struct i40e_vsi *vsi = pf->vsi[i];
-
- if (vsi && vsi->type == type)
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->type == type)
return vsi;
- }
return NULL;
}
@@ -1199,7 +1212,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
-struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
@@ -1223,8 +1236,8 @@ static inline void i40e_dbg_exit(void) {}
int i40e_lan_add_device(struct i40e_pf *pf);
int i40e_lan_del_device(struct i40e_pf *pf);
void i40e_client_subtask(struct i40e_pf *pf);
-void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
-void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
+void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf);
+void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
void i40e_client_update_msix_info(struct i40e_pf *pf);
@@ -1267,7 +1280,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
{
- return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
+ return test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
}
#ifdef CONFIG_I40E_DCB
@@ -1301,7 +1314,7 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf);
int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
-void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
@@ -1321,13 +1334,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
* i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
* @pf: pointer to a pf.
*
- * Check and return value of flag I40E_FLAG_TC_MQPRIO.
+ * Check and return state of flag I40E_FLAG_TC_MQPRIO.
*
- * Return: I40E_FLAG_TC_MQPRIO set state.
+ * Return: true/false if I40E_FLAG_TC_MQPRIO is set or not
**/
-static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+static inline bool i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
{
- return pf->flags & I40E_FLAG_TC_MQPRIO;
+ return test_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
}
/**
@@ -1341,4 +1354,62 @@ static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw)
struct device *i40e_hw_to_dev(struct i40e_hw *hw);
+/**
+ * i40e_pf_get_vsi_by_seid - find VSI by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_vsi *
+i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_vsi *vsi;
+ int i;
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->seid == seid)
+ return vsi;
+
+ return NULL;
+}
+
+/**
+ * i40e_pf_get_main_vsi - get pointer to main VSI
+ * @pf: pointer to a PF
+ *
+ * Return: pointer to main VSI or NULL if it does not exist
+ **/
+static inline struct i40e_vsi *i40e_pf_get_main_vsi(struct i40e_pf *pf)
+{
+ return (pf->lan_vsi != I40E_NO_VSI) ? pf->vsi[pf->lan_vsi] : NULL;
+}
+
+/**
+ * i40e_pf_get_veb_by_seid - find VEB by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_veb *
+i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == seid)
+ return veb;
+
+ return NULL;
+}
+
+/**
+ * i40e_pf_get_main_veb - get pointer to main VEB
+ * @pf: pointer to a PF
+ *
+ * Return: pointer to main VEB or NULL if it does not exist
+ **/
+static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf)
+{
+ return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9ce6e633cc2f..f73f5930fc58 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -9,40 +9,6 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
- * i40e_adminq_init_regs - Initialize AdminQ registers
- * @hw: pointer to the hardware structure
- *
- * This assumes the alloc_asq and alloc_arq functions have already been called
- **/
-static void i40e_adminq_init_regs(struct i40e_hw *hw)
-{
- /* set head and tail registers in our local struct */
- if (i40e_is_vf(hw)) {
- hw->aq.asq.tail = I40E_VF_ATQT1;
- hw->aq.asq.head = I40E_VF_ATQH1;
- hw->aq.asq.len = I40E_VF_ATQLEN1;
- hw->aq.asq.bal = I40E_VF_ATQBAL1;
- hw->aq.asq.bah = I40E_VF_ATQBAH1;
- hw->aq.arq.tail = I40E_VF_ARQT1;
- hw->aq.arq.head = I40E_VF_ARQH1;
- hw->aq.arq.len = I40E_VF_ARQLEN1;
- hw->aq.arq.bal = I40E_VF_ARQBAL1;
- hw->aq.arq.bah = I40E_VF_ARQBAH1;
- } else {
- hw->aq.asq.tail = I40E_PF_ATQT;
- hw->aq.asq.head = I40E_PF_ATQH;
- hw->aq.asq.len = I40E_PF_ATQLEN;
- hw->aq.asq.bal = I40E_PF_ATQBAL;
- hw->aq.asq.bah = I40E_PF_ATQBAH;
- hw->aq.arq.tail = I40E_PF_ARQT;
- hw->aq.arq.head = I40E_PF_ARQH;
- hw->aq.arq.len = I40E_PF_ARQLEN;
- hw->aq.arq.bal = I40E_PF_ARQBAL;
- hw->aq.arq.bah = I40E_PF_ARQBAH;
- }
-}
-
-/**
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
@@ -267,17 +233,17 @@ static int i40e_config_asq_regs(struct i40e_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, I40E_PF_ATQH, 0);
+ wr32(hw, I40E_PF_ATQT, 0);
/* set starting point */
- wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
- wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
- wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.asq.bal);
+ reg = rd32(hw, I40E_PF_ATQBAL);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = -EIO;
@@ -296,20 +262,20 @@ static int i40e_config_arq_regs(struct i40e_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, I40E_PF_ARQH, 0);
+ wr32(hw, I40E_PF_ARQT, 0);
/* set starting point */
- wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
- wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
- wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
- wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+ wr32(hw, I40E_PF_ARQT, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.arq.bal);
+ reg = rd32(hw, I40E_PF_ARQBAL);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = -EIO;
@@ -452,11 +418,11 @@ static int i40e_shutdown_asq(struct i40e_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
- wr32(hw, hw->aq.asq.len, 0);
- wr32(hw, hw->aq.asq.bal, 0);
- wr32(hw, hw->aq.asq.bah, 0);
+ wr32(hw, I40E_PF_ATQH, 0);
+ wr32(hw, I40E_PF_ATQT, 0);
+ wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, I40E_PF_ATQBAL, 0);
+ wr32(hw, I40E_PF_ATQBAH, 0);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
@@ -486,11 +452,11 @@ static int i40e_shutdown_arq(struct i40e_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
- wr32(hw, hw->aq.arq.len, 0);
- wr32(hw, hw->aq.arq.bal, 0);
- wr32(hw, hw->aq.arq.bah, 0);
+ wr32(hw, I40E_PF_ARQH, 0);
+ wr32(hw, I40E_PF_ARQT, 0);
+ wr32(hw, I40E_PF_ARQLEN, 0);
+ wr32(hw, I40E_PF_ARQBAL, 0);
+ wr32(hw, I40E_PF_ARQBAH, 0);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
@@ -503,44 +469,76 @@ shutdown_arq_out:
}
/**
- * i40e_set_hw_flags - set HW flags
+ * i40e_set_hw_caps - set HW flags
* @hw: pointer to the hardware structure
**/
-static void i40e_set_hw_flags(struct i40e_hw *hw)
+static void i40e_set_hw_caps(struct i40e_hw *hw)
{
- struct i40e_adminq_info *aq = &hw->aq;
-
- hw->flags = 0;
+ bitmap_zero(hw->caps, I40E_HW_CAPS_NBITS);
switch (hw->mac.type) {
case I40E_MAC_XL710:
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
+ set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
/* The ability to RX (not drop) 802.1ad frames */
- hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ set_bit(I40E_HW_CAP_802_1AD, hw->caps);
+ }
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5)) {
+ /* Supported in FW API version higher than 1.4 */
+ set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
+ }
+ if (i40e_is_fw_ver_lt(hw, 4, 33)) {
+ set_bit(I40E_HW_CAP_RESTART_AUTONEG, hw->caps);
+ /* No DCB support for FW < v4.33 */
+ set_bit(I40E_HW_CAP_NO_DCB_SUPPORT, hw->caps);
+ }
+ if (i40e_is_fw_ver_lt(hw, 4, 3)) {
+ /* Disable FW LLDP if FW < v4.3 */
+ set_bit(I40E_HW_CAP_STOP_FW_LLDP, hw->caps);
+ }
+ if (i40e_is_fw_ver_ge(hw, 4, 40)) {
+ /* Use the FW Set LLDP MIB API if FW >= v4.40 */
+ set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
+ }
+ if (i40e_is_fw_ver_ge(hw, 6, 0)) {
+ /* Enable PTP L4 if FW > v6.0 */
+ set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
}
break;
case I40E_MAC_X722:
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+ set_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps);
+ set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
+ set_bit(I40E_HW_CAP_RSS_AQ, hw->caps);
+ set_bit(I40E_HW_CAP_128_QP_RSS, hw->caps);
+ set_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
+ set_bit(I40E_HW_CAP_WB_ON_ITR, hw->caps);
+ set_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, hw->caps);
+ set_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, hw->caps);
+ set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
+ set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
+ set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
+ set_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, hw->caps);
+ set_bit(I40E_HW_CAP_OUTER_UDP_CSUM, hw->caps);
+
+ if (rd32(hw, I40E_GLQF_FDEVICTENA(1)) !=
+ I40E_FDEVICT_PCTYPE_DEFAULT) {
+ hw_warn(hw, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
+ clear_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
+ }
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_GET_LINK_INFO_X722))
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
- hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_FW_REQUEST_FEC_X722))
+ set_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps);
fallthrough;
default:
@@ -548,22 +546,18 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
}
/* Newer versions of firmware require lock when reading the NVM */
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 5))
- hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 8)) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
- hw->flags |= I40E_HW_FLAG_DROP_MODE;
- }
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
+ set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
+
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if (i40e_is_aq_api_ver_ge(hw, 1, 7))
+ set_bit(I40E_HW_CAP_802_1AD, hw->caps);
+
+ if (i40e_is_aq_api_ver_ge(hw, 1, 8))
+ set_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 9))
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+ if (i40e_is_aq_api_ver_ge(hw, 1, 9))
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps);
}
/**
@@ -593,9 +587,6 @@ int i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_exit;
}
- /* Set up register offsets */
- i40e_adminq_init_regs(hw);
-
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
@@ -633,7 +624,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
/* Some features were introduced in different FW API version
* for different MAC type.
*/
- i40e_set_hw_flags(hw);
+ i40e_set_hw_caps(hw);
/* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
@@ -648,25 +639,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
- if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
- }
- if (hw->mac.type == I40E_MAC_X722 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
- }
-
- /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 7))
- hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
-
- if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR + 1, 0)) {
ret_code = -EIO;
goto init_adminq_free_arq;
}
@@ -723,9 +696,9 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
- while (rd32(hw, hw->aq.asq.head) != ntc) {
+ while (rd32(hw, I40E_PF_ATQH) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, I40E_PF_ATQH));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
@@ -759,7 +732,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+ return rd32(hw, I40E_PF_ATQH) == hw->aq.asq.next_to_use;
}
@@ -800,7 +773,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
hw->aq.asq_last_status = I40E_AQ_RC_OK;
- val = rd32(hw, hw->aq.asq.head);
+ val = rd32(hw, I40E_PF_ATQH);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
@@ -892,7 +865,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
- wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+ wr32(hw, I40E_PF_ATQT, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
@@ -952,7 +925,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ if (rd32(hw, I40E_PF_ATQLEN) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = -EIO;
@@ -1106,7 +1079,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ ntu = rd32(hw, I40E_PF_ARQH) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = -EALREADY;
@@ -1154,7 +1127,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
- wr32(hw, hw->aq.arq.tail, ntc);
+ wr32(hw, I40E_PF_ARQT, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 80125bea80a2..ee86d2c53079 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -29,13 +29,6 @@ struct i40e_adminq_ring {
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
-
- /* used for queue tracking */
- u32 head;
- u32 tail;
- u32 len;
- u32 bah;
- u32 bal;
};
/* ASQ transaction details */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 18a1c3b6d72c..c8f35d4de271 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -5,6 +5,7 @@
#define _I40E_ADMINQ_CMD_H_
#include <linux/bits.h>
+#include <linux/types.h>
/* This header file defines the i40e Admin Queue commands and is shared between
* i40e Firmware and Software.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 306758428aef..59263551c383 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -101,25 +101,26 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
/**
* i40e_notify_client_of_l2_param_changes - call the client notify callback
- * @vsi: the VSI with l2 param changes
+ * @pf: PF device pointer
*
- * If there is a client to this VSI, call the client
+ * If there is a client, call its callback
**/
-void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
+void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf)
{
- struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev = pf->cinst;
struct i40e_params params;
if (!cdev || !cdev->client)
return;
if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
- dev_dbg(&vsi->back->pdev->dev,
+ dev_dbg(&pf->pdev->dev,
"Cannot locate client instance l2_param_change routine\n");
return;
}
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ dev_dbg(&pf->pdev->dev,
+ "Client is not open, abort l2 param change\n");
return;
}
memset(&params, 0, sizeof(params));
@@ -148,8 +149,6 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
u32 reg_idx;
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
}
@@ -159,20 +158,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
/**
* i40e_notify_client_of_netdev_close - call the client close callback
- * @vsi: the VSI with netdev closed
+ * @pf: PF device pointer
* @reset: true when close called due to a reset pending
*
* If there is a client to this netdev, call the client with close
**/
-void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset)
{
- struct i40e_pf *pf = vsi->back;
struct i40e_client_instance *cdev = pf->cinst;
if (!cdev || !cdev->client)
return;
if (!cdev->client->ops || !cdev->client->ops->close) {
- dev_dbg(&vsi->back->pdev->dev,
+ dev_dbg(&pf->pdev->dev,
"Cannot locate client instance close routine\n");
return;
}
@@ -335,9 +333,9 @@ static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name)
**/
static void i40e_client_add_instance(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev = NULL;
struct netdev_hw_addr *mac = NULL;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
@@ -401,9 +399,9 @@ void i40e_client_del_instance(struct i40e_pf *pf)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
- struct i40e_client *client;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_client *client;
int ret = 0;
if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
@@ -576,8 +574,6 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this client */
@@ -669,8 +665,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
bool is_vf, u32 vf_id,
u32 flag, u32 valid_flag)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(ldev->pf);
struct i40e_pf *pf = ldev->pf;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
int err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d7e24d661724..e8031f1a9b4f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2021 Intel Corporation. */
#include <linux/avf/virtchnl.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
@@ -195,11 +196,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
- if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) &
- I40E_PF_ATQLEN_ATQENABLE_MASK);
- else
+ /* Check if the queue is initialized */
+ if (!hw->aq.asq.count)
return false;
+
+ return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK);
}
/**
@@ -248,6 +249,7 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
int status;
+ u16 flags;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -260,23 +262,18 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+ vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
if (pf_lut)
- cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF);
else
- cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI);
+ cmd_resp->flags = cpu_to_le16(flags);
status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status;
@@ -346,11 +343,9 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
@@ -386,259 +381,6 @@ int i40e_aq_set_rss_key(struct i40e_hw *hw,
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT i40e_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum i40e_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- I40E_RX_PTYPE_##OUTER_FRAG, \
- I40E_RX_PTYPE_TUNNEL_##T, \
- I40E_RX_PTYPE_TUNNEL_END_##TE, \
- I40E_RX_PTYPE_##TEF, \
- I40E_RX_PTYPE_INNER_PROT_##I, \
- I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
-#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
-#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
-struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
- /* L2 Packet types */
- I40E_PTT_UNUSED_ENTRY(0),
- I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(4),
- I40E_PTT_UNUSED_ENTRY(5),
- I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(8),
- I40E_PTT_UNUSED_ENTRY(9),
- I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(25),
- I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(32),
- I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(39),
- I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(47),
- I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(54),
- I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(62),
- I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(69),
- I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(77),
- I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(84),
- I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(91),
- I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(98),
- I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(105),
- I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(113),
- I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(120),
- I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(128),
- I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(135),
- I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(143),
- I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(150),
- I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@@ -669,11 +411,11 @@ int i40e_init_shared_code(struct i40e_hw *hw)
hw->phy.get_link_info = true;
/* Determine port number and PF number*/
- port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
- >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ port = FIELD_GET(I40E_PFGEN_PORTNUM_PORT_NUM_MASK,
+ rd32(hw, I40E_PFGEN_PORTNUM));
hw->port = (u8)port;
- ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
- I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ ari = FIELD_GET(I40E_GLPCI_CAPSUP_ARI_EN_MASK,
+ rd32(hw, I40E_GLPCI_CAPSUP));
func_rid = rd32(hw, I40E_PF_FUNC_RID);
if (ari)
hw->pf_id = (u8)(func_rid & 0xff);
@@ -991,9 +733,8 @@ int i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
- I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
- I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = FIELD_GET(I40E_GLGEN_RSTCTL_GRSTDEL_MASK,
+ rd32(hw, I40E_GLGEN_RSTCTL));
/* It can take upto 15 secs for GRST steady state.
* Bump it to 16 secs max to be safe.
@@ -1085,26 +826,20 @@ void i40e_clear_hw(struct i40e_hw *hw)
/* get number of interrupts, queues, and VFs */
val = rd32(hw, I40E_GLPCI_CNF2);
- num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
- I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
- num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
- I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+ num_pf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_PF_N_MASK, val);
+ num_vf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_VF_N_MASK, val);
val = rd32(hw, I40E_PFLAN_QALLOC);
- base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
- I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
- j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
- I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ base_queue = FIELD_GET(I40E_PFLAN_QALLOC_FIRSTQ_MASK, val);
+ j = FIELD_GET(I40E_PFLAN_QALLOC_LASTQ_MASK, val);
if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
num_queues = (j - base_queue) + 1;
else
num_queues = 0;
val = rd32(hw, I40E_PF_VT_PFALLOC);
- i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
- I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
- j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
- I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ i = FIELD_GET(I40E_PF_VT_PFALLOC_FIRSTVF_MASK, val);
+ j = FIELD_GET(I40E_PF_VT_PFALLOC_LASTVF_MASK, val);
if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
num_vfs = (j - i) + 1;
else
@@ -1199,8 +934,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
!hw->func_caps.led[idx])
return 0;
gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
- I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ port = FIELD_GET(I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK, gpio_val);
/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
* if it is not our port then ignore
@@ -1244,8 +978,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
- mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
- I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ mode = FIELD_GET(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, gpio_val);
break;
}
@@ -1288,14 +1021,14 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
pin_func = I40E_PIN_FUNC_LED;
gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
- gpio_val |= ((pin_func <<
- I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
- I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+ gpio_val |=
+ FIELD_PREP(I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK,
+ pin_func);
}
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
- gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
- I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+ gpio_val |= FIELD_PREP(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK,
+ mode);
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
@@ -1374,8 +1107,8 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (report_init) {
if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
+ I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
} else {
hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
@@ -1645,12 +1378,11 @@ int i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->lse_enable = false;
- if ((hw->mac.type == I40E_MAC_XL710) &&
- (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
- hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) &&
+ hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) &&
hw->mac.type != I40E_MAC_X722) {
__le32 tmp;
@@ -1750,21 +1482,6 @@ int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
}
/**
- * i40e_is_aq_api_ver_ge
- * @aq: pointer to AdminQ info containing HW API version to compare
- * @maj: API major value
- * @min: API minor value
- *
- * Assert whether current HW API version is greater/equal than provided.
- **/
-static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
- u16 min)
-{
- return (aq->api_maj_ver > maj ||
- (aq->api_maj_ver == maj && aq->api_min_ver >= min));
-}
-
-/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -1890,14 +1607,14 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
cmd->valid_flags |=
cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
@@ -2000,13 +1717,13 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
cmd->valid_flags |=
cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
@@ -2253,7 +1970,7 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw,
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
scfg->mode = mode;
- if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) {
scfg->switch_tag = cpu_to_le16(hw->switch_tag);
scfg->first_tag = cpu_to_le16(hw->first_tag);
scfg->second_tag = cpu_to_le16(hw->second_tag);
@@ -3530,8 +3247,7 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
- cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
desc.datalen = cpu_to_le16(buff_size);
@@ -3637,7 +3353,7 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
(struct i40e_aqc_lldp_restore *)&desc.params.raw;
int status;
- if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
i40e_debug(hw, I40E_DEBUG_ALL,
"Restore LLDP not supported by current FW version.\n");
return -ENODEV;
@@ -3680,7 +3396,7 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
if (persist) {
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
else
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3713,7 +3429,7 @@ int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
cmd->command = I40E_AQ_LLDP_AGENT_START;
if (persist) {
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
else
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3741,7 +3457,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
int status;
- if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
return -ENODEV;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -4212,8 +3928,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
val = rd32(hw, I40E_GLHMC_FCOEFMAX);
- fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
- >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ fcoe_fmax = FIELD_GET(I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK, val);
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
return -EINVAL;
@@ -4249,30 +3964,25 @@ int i40e_set_filter_control(struct i40e_hw *hw,
/* Program required PE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
- val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PEHSIZE_MASK, settings->pe_filt_num);
/* Program required PE contexts for the PF */
val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
- val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PEDSIZE_MASK, settings->pe_cntx_num);
/* Program required FCoE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
- val |= ((u32)settings->fcoe_filt_num <<
- I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCHSIZE_MASK,
+ settings->fcoe_filt_num);
/* Program required FCoE DDP contexts for the PF */
val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
- val |= ((u32)settings->fcoe_cntx_num <<
- I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCDSIZE_MASK,
+ settings->fcoe_cntx_num);
/* Program Hash LUT size for the PF */
val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
hash_lut_size = 1;
- val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
- I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_HASHLUTSIZE_MASK, hash_lut_size);
/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
if (settings->enable_fdir)
@@ -4673,8 +4383,7 @@ int i40e_read_phy_register_clause22(struct i40e_hw *hw,
"PHY: Can't write command to external PHY.\n");
} else {
command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
- *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
- I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
}
return status;
@@ -4783,8 +4492,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
if (!status) {
command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
- *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
- I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
} else {
i40e_debug(hw, I40E_DEBUG_PHY,
"PHY: Can't read register value from external PHY.\n");
@@ -5043,7 +4751,7 @@ static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
u32 i;
*reg_val = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5076,7 +4784,7 @@ static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
int status;
u32 i;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5115,7 +4823,7 @@ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u8 port_num;
u32 i;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5238,14 +4946,14 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
**/
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
- bool use_register;
+ bool use_register = false;
int status = 0;
int retry = 5;
u32 val = 0;
- use_register = (((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver < 5)) ||
- (hw->mac.type == I40E_MAC_X722));
+ if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
+ use_register = true;
+
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -5300,13 +5008,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
**/
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
- bool use_register;
+ bool use_register = false;
int status = 0;
int retry = 5;
- use_register = (((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver < 5)) ||
- (hw->mac.type == I40E_MAC_X722));
+ if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
+ use_register = true;
+
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -5334,16 +5042,17 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
u8 mdio_num,
struct i40e_aqc_phy_register_access *cmd)
{
- if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
- cmd->cmd_flags |=
- I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
- ((mdio_num <<
- I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
- I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
- else
- i40e_debug(hw, I40E_DEBUG_PHY,
- "MDIO I/F number selection not supported by current FW version.\n");
+ if (!set_mdio ||
+ cmd->phy_interface != I40E_AQ_PHY_REG_ACCESS_EXTERNAL)
+ return;
+
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) {
+ cmd->cmd_flags |=
+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+ FIELD_PREP(I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK,
+ mdio_num);
+ } else {
+ i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n");
}
}
@@ -5928,9 +5637,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(filters[i].element.flags) &
- I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
- I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ tnl_type = le16_get_bits(filters[i].element.flags,
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
/* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in
@@ -6022,9 +5730,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(filters[i].element.flags) &
- I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
- I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ tnl_type = le16_get_bits(filters[i].element.flags,
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
/* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 68602fc375f6..8db1eb0c1768 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2021 Intel Corporation. */
+#include <linux/bitfield.h>
+#include "i40e_adminq.h"
#include "i40e_alloc.h"
#include "i40e_dcb.h"
#include "i40e_prototype.h"
@@ -20,8 +22,7 @@ int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
return -EINVAL;
reg = rd32(hw, I40E_PRTDCB_GENS);
- *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
- I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+ *status = FIELD_GET(I40E_PRTDCB_GENS_DCBX_STATUS_MASK, reg);
return 0;
}
@@ -50,12 +51,9 @@ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
* |1bit | 1bit|3 bits|3bits|
*/
etscfg = &dcbcfg->etscfg;
- etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
- I40E_IEEE_ETS_WILLING_SHIFT);
- etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
- I40E_IEEE_ETS_CBS_SHIFT);
- etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
- I40E_IEEE_ETS_MAXTC_SHIFT);
+ etscfg->willing = FIELD_GET(I40E_IEEE_ETS_WILLING_MASK, buf[offset]);
+ etscfg->cbs = FIELD_GET(I40E_IEEE_ETS_CBS_MASK, buf[offset]);
+ etscfg->maxtcs = FIELD_GET(I40E_IEEE_ETS_MAXTC_MASK, buf[offset]);
/* Move offset to Priority Assignment Table */
offset++;
@@ -69,11 +67,9 @@ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
- I40E_IEEE_ETS_PRIO_1_SHIFT);
- etscfg->prioritytable[i * 2] = priority;
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
- I40E_IEEE_ETS_PRIO_0_SHIFT);
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_1_MASK, buf[offset]);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_0_MASK, buf[offset]);
etscfg->prioritytable[i * 2 + 1] = priority;
offset++;
}
@@ -124,12 +120,10 @@ static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
- I40E_IEEE_ETS_PRIO_1_SHIFT);
- dcbcfg->etsrec.prioritytable[i*2] = priority;
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
- I40E_IEEE_ETS_PRIO_0_SHIFT);
- dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_1_MASK, buf[offset]);
+ dcbcfg->etsrec.prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_0_MASK, buf[offset]);
+ dcbcfg->etsrec.prioritytable[(i * 2) + 1] = priority;
offset++;
}
@@ -170,12 +164,9 @@ static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
* |1bit | 1bit|2 bits|4bits| 1 octet |
*/
- dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
- I40E_IEEE_PFC_WILLING_SHIFT);
- dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
- I40E_IEEE_PFC_MBC_SHIFT);
- dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
- I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.willing = FIELD_GET(I40E_IEEE_PFC_WILLING_MASK, buf[0]);
+ dcbcfg->pfc.mbc = FIELD_GET(I40E_IEEE_PFC_MBC_MASK, buf[0]);
+ dcbcfg->pfc.pfccap = FIELD_GET(I40E_IEEE_PFC_CAP_MASK, buf[0]);
dcbcfg->pfc.pfcenable = buf[1];
}
@@ -196,8 +187,7 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
u8 *buf;
typelength = ntohs(tlv->typelength);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
buf = tlv->tlvinfo;
/* The App priority table starts 5 octets after TLV header */
@@ -215,12 +205,10 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
while (offset < length) {
- dcbcfg->app[i].priority = (u8)((buf[offset] &
- I40E_IEEE_APP_PRIO_MASK) >>
- I40E_IEEE_APP_PRIO_SHIFT);
- dcbcfg->app[i].selector = (u8)((buf[offset] &
- I40E_IEEE_APP_SEL_MASK) >>
- I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].priority = FIELD_GET(I40E_IEEE_APP_PRIO_MASK,
+ buf[offset]);
+ dcbcfg->app[i].selector = FIELD_GET(I40E_IEEE_APP_SEL_MASK,
+ buf[offset]);
dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
buf[offset + 2];
/* Move to next app */
@@ -248,8 +236,7 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
u8 subtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
- I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ subtype = FIELD_GET(I40E_LLDP_TLV_SUBTYPE_MASK, ouisubtype);
switch (subtype) {
case I40E_IEEE_SUBTYPE_ETS_CFG:
i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
@@ -299,11 +286,9 @@ static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
- etscfg->prioritytable[i * 2] = priority;
- priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
+ priority = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK, buf[offset]);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK, buf[offset]);
etscfg->prioritytable[i * 2 + 1] = priority;
offset++;
}
@@ -360,8 +345,7 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
u8 i;
typelength = ntohs(tlv->hdr.typelen);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
dcbcfg->numapps = length / sizeof(*app);
@@ -417,15 +401,13 @@ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
u32 ouisubtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
- I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ subtype = FIELD_GET(I40E_LLDP_TLV_SUBTYPE_MASK, ouisubtype);
/* Return if not CEE DCBX */
if (subtype != I40E_CEE_DCBX_TYPE)
return;
typelength = ntohs(tlv->typelength);
- tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ tlvlen = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
sizeof(struct i40e_cee_ctrl_tlv);
/* Return if no CEE DCBX Feature TLVs */
@@ -435,11 +417,8 @@ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
typelength = ntohs(sub_tlv->hdr.typelen);
- sublen = (u16)((typelength &
- I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
- subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
- I40E_LLDP_TLV_TYPE_SHIFT);
+ sublen = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
+ subtype = FIELD_GET(I40E_LLDP_TLV_TYPE_MASK, typelength);
switch (subtype) {
case I40E_CEE_SUBTYPE_PG_CFG:
i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
@@ -476,8 +455,7 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
u32 oui;
ouisubtype = ntohl(tlv->ouisubtype);
- oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
- I40E_LLDP_TLV_OUI_SHIFT);
+ oui = FIELD_GET(I40E_LLDP_TLV_OUI_MASK, ouisubtype);
switch (oui) {
case I40E_IEEE_8021QAZ_OUI:
i40e_parse_ieee_tlv(tlv, dcbcfg);
@@ -515,10 +493,8 @@ int i40e_lldp_to_dcb_config(u8 *lldpmib,
tlv = (struct i40e_lldp_org_tlv *)lldpmib;
while (1) {
typelength = ntohs(tlv->typelength);
- type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
- I40E_LLDP_TLV_TYPE_SHIFT);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ type = FIELD_GET(I40E_LLDP_TLV_TYPE_MASK, typelength);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
offset += sizeof(typelength) + length;
/* END TLV or beyond LLDPDU size */
@@ -592,7 +568,7 @@ static void i40e_cee_to_dcb_v1_config(
{
u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, tc, err;
+ u8 i, err;
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -601,13 +577,13 @@ static void i40e_cee_to_dcb_v1_config(
* from those in the CEE Priority Group sub-TLV.
*/
for (i = 0; i < 4; i++) {
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
- dcbcfg->etscfg.prioritytable[i * 2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
+ u8 tc;
+
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK,
+ cee_cfg->oper_prio_tc[i]);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
}
@@ -629,8 +605,7 @@ static void i40e_cee_to_dcb_v1_config(
dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
- status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
- I40E_AQC_CEE_APP_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_APP_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
/* Add APPs if Error is False */
if (!err) {
@@ -639,22 +614,19 @@ static void i40e_cee_to_dcb_v1_config(
/* FCoE APP */
dcbcfg->app[0].priority =
- (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
- I40E_AQC_CEE_APP_FCOE_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FCOE_MASK, app_prio);
dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
/* iSCSI APP */
dcbcfg->app[1].priority =
- (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
- I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_ISCSI_MASK, app_prio);
dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
/* FIP APP */
dcbcfg->app[2].priority =
- (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
- I40E_AQC_CEE_APP_FIP_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FIP_MASK, app_prio);
dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
}
@@ -673,7 +645,7 @@ static void i40e_cee_to_dcb_config(
{
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, tc, err, sync, oper;
+ u8 i, err, sync, oper;
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -682,13 +654,13 @@ static void i40e_cee_to_dcb_config(
* from those in the CEE Priority Group sub-TLV.
*/
for (i = 0; i < 4; i++) {
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
- dcbcfg->etscfg.prioritytable[i * 2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
+ u8 tc;
+
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK,
+ cee_cfg->oper_prio_tc[i]);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
}
@@ -711,8 +683,7 @@ static void i40e_cee_to_dcb_config(
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
i = 0;
- status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
- I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_FCOE_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -720,15 +691,13 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* FCoE APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
- I40E_AQC_CEE_APP_FCOE_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FCOE_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
i++;
}
- status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
- I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_ISCSI_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -736,15 +705,13 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* iSCSI APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
- I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_ISCSI_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
i++;
}
- status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
- I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_FIP_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -752,8 +719,7 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* FIP APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
- I40E_AQC_CEE_APP_FIP_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FIP_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
i++;
@@ -804,14 +770,11 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
int ret = 0;
/* If Firmware version < v4.33 on X710/XL710, IEEE only */
- if ((hw->mac.type == I40E_MAC_XL710) &&
- (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)))
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 33))
return i40e_get_ieee_dcb_config(hw);
/* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
- if ((hw->mac.type == I40E_MAC_XL710) &&
- ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_eq(hw, 4, 33)) {
ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
sizeof(cee_v1_cfg), NULL);
if (!ret) {
@@ -877,7 +840,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
return -EOPNOTSUPP;
/* Read LLDP NVM area */
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
u8 offset = 0;
if (hw->mac.type == I40E_MAC_XL710)
@@ -1189,7 +1152,7 @@ static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
selector = dcbcfg->app[i].selector & 0x7;
buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
- buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
/* Move to next app */
offset += 3;
i++;
@@ -1285,8 +1248,7 @@ int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
do {
i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
typelength = ntohs(tlv->typelength);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
if (length)
offset += length + I40E_IEEE_TLV_HEADER_LENGTH;
/* END TLV or beyond LLDPDU size */
@@ -1321,20 +1283,16 @@ void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
u32 reg = rd32(hw, I40E_PRTDCB_RETSC);
reg &= ~I40E_PRTDCB_RETSC_ETS_MODE_MASK;
- reg |= ((u32)ets_mode << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) &
- I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_ETS_MODE_MASK, ets_mode);
reg &= ~I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
- reg |= ((u32)non_ets_mode << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) &
- I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK, non_ets_mode);
reg &= ~I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
- reg |= (max_exponent << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) &
- I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK, max_exponent);
reg &= ~I40E_PRTDCB_RETSC_LLTC_MASK;
- reg |= (lltc_map << I40E_PRTDCB_RETSC_LLTC_SHIFT) &
- I40E_PRTDCB_RETSC_LLTC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_LLTC_MASK, lltc_map);
wr32(hw, I40E_PRTDCB_RETSC, reg);
}
@@ -1389,14 +1347,12 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
*/
reg = rd32(hw, I40E_PRT_SWR_PM_THR);
reg &= ~I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
- reg |= (threshold << I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT) &
- I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ reg |= FIELD_PREP(I40E_PRT_SWR_PM_THR_THRESHOLD_MASK, threshold);
wr32(hw, I40E_PRT_SWR_PM_THR, reg);
reg = rd32(hw, I40E_PRTDCB_RPPMC);
reg &= ~I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
- reg |= (fifo_size << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) &
- I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK, fifo_size);
wr32(hw, I40E_PRTDCB_RPPMC, reg);
}
@@ -1438,19 +1394,17 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
reg &= ~I40E_PRTDCB_MFLCN_RPFCE_MASK;
if (pfc_en) {
- reg |= BIT(I40E_PRTDCB_MFLCN_RPFCM_SHIFT) &
- I40E_PRTDCB_MFLCN_RPFCM_MASK;
- reg |= ((u32)pfc_en << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) &
- I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_MFLCN_RPFCM_MASK, 1);
+ reg |= FIELD_PREP(I40E_PRTDCB_MFLCN_RPFCE_MASK,
+ pfc_en);
}
wr32(hw, I40E_PRTDCB_MFLCN, reg);
reg = rd32(hw, I40E_PRTDCB_FCCFG);
reg &= ~I40E_PRTDCB_FCCFG_TFCE_MASK;
if (pfc_en)
- reg |= (I40E_DCB_PFC_ENABLED <<
- I40E_PRTDCB_FCCFG_TFCE_SHIFT) &
- I40E_PRTDCB_FCCFG_TFCE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_FCCFG_TFCE_MASK,
+ I40E_DCB_PFC_ENABLED);
wr32(hw, I40E_PRTDCB_FCCFG, reg);
/* FCTTV and FCRTV to be set by default */
@@ -1468,25 +1422,22 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE);
reg &= ~I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
- reg |= ((u32)pfc_en <<
- I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK,
+ pfc_en);
wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, reg);
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE);
reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
- reg |= ((u32)pfc_en <<
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK,
+ pfc_en);
wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, reg);
for (i = 0; i < I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX; i++) {
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i));
reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
if (pfc_en) {
- reg |= ((u32)refresh_time <<
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK,
+ refresh_time);
}
wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i), reg);
}
@@ -1498,14 +1449,12 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg = rd32(hw, I40E_PRTDCB_TC2PFC);
reg &= ~I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
- reg |= ((u32)tc2pfc << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) &
- I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_TC2PFC_TC2PFC_MASK, tc2pfc);
wr32(hw, I40E_PRTDCB_TC2PFC, reg);
reg = rd32(hw, I40E_PRTDCB_RUP);
reg &= ~I40E_PRTDCB_RUP_NOVLANUP_MASK;
- reg |= ((u32)first_pfc_prio << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) &
- I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RUP_NOVLANUP_MASK, first_pfc_prio);
wr32(hw, I40E_PRTDCB_RUP, reg);
reg = rd32(hw, I40E_PRTDCB_TDPMC);
@@ -1537,8 +1486,7 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
u32 reg = rd32(hw, I40E_PRTDCB_GENC);
reg &= ~I40E_PRTDCB_GENC_NUMTC_MASK;
- reg |= ((u32)num_tc << I40E_PRTDCB_GENC_NUMTC_SHIFT) &
- I40E_PRTDCB_GENC_NUMTC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_GENC_NUMTC_MASK, num_tc);
wr32(hw, I40E_PRTDCB_GENC, reg);
}
@@ -1552,8 +1500,7 @@ u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
{
u32 reg = rd32(hw, I40E_PRTDCB_GENC);
- return (u8)((reg & I40E_PRTDCB_GENC_NUMTC_MASK) >>
- I40E_PRTDCB_GENC_NUMTC_SHIFT);
+ return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
}
/**
@@ -1576,13 +1523,13 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
- I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
- reg |= ((u32)bw_share[i] << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
- I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
- reg |= ((u32)mode[i] << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
- I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
- reg |= ((u32)prio_type[i] << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
- I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK,
+ bw_share[i]);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK,
+ mode[i]);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_ETSTC_MASK,
+ prio_type[i]);
wr32(hw, I40E_PRTDCB_RETSTCC(i), reg);
}
}
@@ -1722,8 +1669,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SLW);
reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
- reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
- I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLW_SLW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SLW, reg);
}
@@ -1736,8 +1682,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SLT(i));
reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
- I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLT_SLT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SLT(i), reg);
}
@@ -1746,8 +1692,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_DLW(i));
reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
- I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DLW_DLW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DLW(i), reg);
}
}
@@ -1758,8 +1704,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SHW);
reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
- reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
- I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHW_SHW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SHW, reg);
}
@@ -1772,8 +1717,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SHT(i));
reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
- I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHT_SHT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SHT(i), reg);
}
@@ -1782,8 +1727,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_DHW(i));
reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
- I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DHW_DHW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DHW(i), reg);
}
}
@@ -1793,8 +1738,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
new_val = new_pb_cfg->tc_pool_size[i];
reg = rd32(hw, I40E_PRTRPB_DPS(i));
reg &= ~I40E_PRTRPB_DPS_DPS_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) &
- I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DPS_DPS_TCN_MASK, new_val);
wr32(hw, I40E_PRTRPB_DPS(i), reg);
}
@@ -1802,8 +1746,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
new_val = new_pb_cfg->shared_pool_size;
reg = rd32(hw, I40E_PRTRPB_SPS);
reg &= ~I40E_PRTRPB_SPS_SPS_MASK;
- reg |= (new_val << I40E_PRTRPB_SPS_SPS_SHIFT) &
- I40E_PRTRPB_SPS_SPS_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SPS_SPS_MASK, new_val);
wr32(hw, I40E_PRTRPB_SPS, reg);
/* Program the shared pool low water mark per port if increasing */
@@ -1812,8 +1755,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SLW);
reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
- reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
- I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLW_SLW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SLW, reg);
}
@@ -1826,8 +1768,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SLT(i));
reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
- I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLT_SLT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SLT(i), reg);
}
@@ -1836,8 +1778,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_DLW(i));
reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
- I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DLW_DLW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DLW(i), reg);
}
}
@@ -1848,8 +1790,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SHW);
reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
- reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
- I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHW_SHW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SHW, reg);
}
@@ -1862,8 +1803,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SHT(i));
reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
- I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHT_SHT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SHT(i), reg);
}
@@ -1872,8 +1813,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_DHW(i));
reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
- I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DHW_DHW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DHW(i), reg);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 6b60dc9b7736..d76497566e40 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -43,7 +43,7 @@
#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
#define I40E_LLDP_TLV_OUI_SHIFT 8
-#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT)
/* Defines for IEEE ETS TLV */
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 077a95dad32c..8aa43aefe84c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -21,8 +21,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
- I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+ *delay = FIELD_GET(I40E_PRTDCB_GENC_PFCLDA_MASK, val);
}
/**
@@ -310,8 +309,8 @@ static u8 i40e_dcbnl_getstate(struct net_device *netdev)
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
dev_dbg(&pf->pdev->dev, "DCB state=%d\n",
- !!(pf->flags & I40E_FLAG_DCB_ENABLED));
- return !!(pf->flags & I40E_FLAG_DCB_ENABLED);
+ test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0);
+ return test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0;
}
/**
@@ -331,19 +330,19 @@ static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state)
return ret;
dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n",
- state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0);
+ state, test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0);
/* Nothing to do */
- if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!state == !test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return ret;
if (i40e_is_sw_dcb(pf)) {
if (state) {
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
memcpy(&pf->hw.desired_dcbx_config,
&pf->hw.local_dcbx_config,
sizeof(struct i40e_dcbx_config));
} else {
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
}
} else {
/* Cannot directly manipulate FW LLDP Agent */
@@ -653,7 +652,7 @@ static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
{
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return I40E_DCBNL_STATUS_ERROR;
switch (capid) {
@@ -693,7 +692,7 @@ static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return -EINVAL;
*num = I40E_MAX_TRAFFIC_CLASS;
@@ -827,15 +826,12 @@ static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
u8 *perm_addr)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
- int i, j;
+ int i;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
for (i = 0; i < dev->addr_len; i++)
perm_addr[i] = pf->hw.mac.perm_addr[i];
-
- for (j = 0; j < dev->addr_len; j++, i++)
- perm_addr[i] = pf->hw.mac.san_addr[j];
}
static const struct dcbnl_rtnl_ops dcbnl_ops = {
@@ -891,11 +887,11 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
return;
/* DCB not enabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return;
/* MFP mode but not an iSCSI PF so return */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi))
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(hw->func_caps.iscsi))
return;
dcbxcfg = &hw->local_dcbx_config;
@@ -951,16 +947,16 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_dcb_app_priority_table *app)
{
+ struct i40e_vsi *vsi;
int v, err;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] && pf->vsi[v]->netdev) {
- err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ if (vsi->netdev) {
+ err = i40e_dcbnl_vsi_del_app(vsi, app);
dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
- pf->vsi[v]->seid, err, app->selector,
+ vsi->seid, err, app->selector,
app->protocolid, app->priority);
}
- }
}
/**
@@ -1002,7 +998,7 @@ void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
int i;
/* MFP mode but not an iSCSI PF so return */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(pf->hw.func_caps.iscsi))
return;
for (i = 0; i < old_cfg->numapps; i++) {
@@ -1025,7 +1021,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
/* Not DCB capable */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return;
dev->dcbnl_ops = &dcbnl_ops;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index cf25bfc5dc3f..daa9f2c42f70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -81,8 +81,8 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
struct i40e_profile_info *old)
{
- unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
- unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+ unsigned int group_id_old = FIELD_GET(0x00FF0000, old->track_id);
+ unsigned int group_id_new = FIELD_GET(0x00FF0000, new->track_id);
/* 0x00 group must be only the first */
if (group_id_new == 0)
@@ -407,8 +407,9 @@ static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
**/
static int i40e_ddp_restore(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct net_device *netdev = vsi->netdev;
struct i40e_ddp_old_profile_list *entry;
- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
int status = 0;
if (!list_empty(&pf->ddp_old_prof)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h
index 27ebc72d8bfe..e9871dfb32bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debug.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h
@@ -37,6 +37,7 @@ struct i40e_hw;
struct device *i40e_hw_to_dev(struct i40e_hw *hw);
#define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A)
+#define hw_warn(hw, S, A...) dev_warn(i40e_hw_to_dev(hw), S, ##A)
#define i40e_debug(h, m, s, ...) \
do { \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 999c9708def5..abf624d770e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -24,31 +24,13 @@ enum ring_type {
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
{
- int i;
-
- if (seid < 0)
+ if (seid < 0) {
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
- else
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
- return pf->vsi[i];
-
- return NULL;
-}
-/**
- * i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf: the PF structure to search for the veb
- * @seid: seid of the veb it is searching for
- **/
-static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
-{
- int i;
+ return NULL;
+ }
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == seid)
- return pf->veb[i];
- return NULL;
+ return i40e_pf_get_vsi_by_seid(pf, seid);
}
/**************************************************************
@@ -71,6 +53,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ struct i40e_vsi *main_vsi;
int bytes_not_copied;
int buf_size = 256;
char *buf;
@@ -86,8 +69,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOSPC;
- len = snprintf(buf, buf_size, "%s: %s\n",
- pf->vsi[pf->lan_vsi]->netdev->name,
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
i40e_dbg_command_buf);
bytes_not_copied = copy_to_user(buffer, buf, len);
@@ -146,10 +129,9 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" state[%d] = %08lx\n",
i, vsi->state[i]);
- if (vsi == pf->vsi[pf->lan_vsi])
- dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+ if (vsi->type == I40E_VSI_MAIN)
+ dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
- pf->hw.mac.san_addr,
pf->hw.mac.port_addr);
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
dev_info(&pf->pdev->dev,
@@ -654,12 +636,11 @@ out:
**/
static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
- i, pf->vsi[i]->seid);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
}
/**
@@ -697,15 +678,14 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
{
struct i40e_veb *veb;
- veb = i40e_dbg_find_veb(pf, seid);
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
if (!veb) {
dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
- "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
- veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
- veb->uplink_seid,
+ "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
+ veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
i40e_dbg_dump_eth_stats(pf, &veb->stats);
}
@@ -719,11 +699,8 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
struct i40e_veb *veb;
int i;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- veb = pf->veb[i];
- if (veb)
- i40e_dbg_dump_veb_seid(pf, veb->seid);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
}
/**
@@ -810,7 +787,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
if (cnt == 0) {
/* default to PF VSI */
- vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+ vsi = i40e_pf_get_main_vsi(pf);
+ vsi_seid = vsi->seid;
} else if (vsi_seid < 0) {
dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
vsi_seid);
@@ -820,8 +798,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
/* By default we are in VEPA mode, if this is the first VF/VMDq
* VSI to be added switch to VEB mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
@@ -852,10 +830,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "add relay", 9) == 0) {
struct i40e_veb *veb;
- int uplink_seid, i;
+ u8 enabled_tc = 0x1;
+ int uplink_seid;
cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
- if (cnt != 2) {
+ if (cnt == 0) {
+ uplink_seid = 0;
+ vsi_seid = 0;
+ } else if (cnt != 2) {
dev_info(&pf->pdev->dev,
"add relay: bad command string, cnt=%d\n",
cnt);
@@ -867,33 +849,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "add relay: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
- break;
- if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
- uplink_seid != pf->mac_seid) {
+ if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
dev_info(&pf->pdev->dev,
"add relay: relay uplink %d not found\n",
uplink_seid);
goto command_write_done;
+ } else if (uplink_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ enabled_tc = vsi->tc_config.enabled_tc;
+ } else if (vsi_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI must be 0 for floating relay\n");
+ goto command_write_done;
}
- veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
- vsi->tc_config.enabled_tc);
+ veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc);
if (veb)
dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
else
dev_info(&pf->pdev->dev, "add relay failed\n");
} else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ struct i40e_veb *veb;
int i;
+
cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev,
@@ -907,9 +892,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* find the veb */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == veb_seid)
break;
+
if (i >= I40E_MAX_VEB) {
dev_info(&pf->pdev->dev,
"del relay: relay %d not found\n", veb_seid);
@@ -917,7 +903,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
- i40e_veb_release(pf->veb[i]);
+ i40e_veb_release(veb);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
unsigned int v;
int ret;
@@ -1029,9 +1015,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"emp reset count: %d\n", pf->empr_count);
dev_info(&pf->pdev->dev,
"pf reset count: %d\n", pf->pfr_count);
- dev_info(&pf->pdev->dev,
- "pf tx sluggish count: %d\n",
- pf->tx_sluggish_count);
} else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
struct i40e_aqc_query_port_ets_config_resp *bw_data;
struct i40e_dcbx_config *cfg =
@@ -1049,7 +1032,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = pf->vsi[pf->lan_vsi];
+ vsi = i40e_pf_get_main_vsi(pf);
switch_id =
le16_to_cpu(vsi->info.switch_id) &
I40E_AQ_VSI_SW_ID_MASK;
@@ -1255,8 +1238,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (cnt == 0) {
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- i40e_vsi_reset_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_reset_stats(vsi);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
@@ -1399,6 +1382,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+ /* Get main VSI */
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1410,10 +1396,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
- pf->hw.mac.addr,
- ETH_P_LLDP, 0,
- pf->vsi[pf->lan_vsi]->seid,
- 0, true, NULL, NULL);
+ pf->hw.mac.addr, ETH_P_LLDP, 0,
+ main_vsi->seid, 0, true, NULL,
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: Add Control Packet Filter AQ command failed =0x%x\n",
@@ -1428,10 +1413,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
int ret;
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
- pf->hw.mac.addr,
- ETH_P_LLDP, 0,
- pf->vsi[pf->lan_vsi]->seid,
- 0, false, NULL, NULL);
+ pf->hw.mac.addr, ETH_P_LLDP, 0,
+ main_vsi->seid, 0, false, NULL,
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: Remove Control Packet Filter AQ command failed =0x%x\n",
@@ -1658,6 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ struct i40e_vsi *main_vsi;
int bytes_not_copied;
int buf_size = 256;
char *buf;
@@ -1673,8 +1658,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOSPC;
- len = snprintf(buf, buf_size, "%s: %s\n",
- pf->vsi[pf->lan_vsi]->netdev->name,
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
i40e_dbg_netdev_ops_buf);
bytes_not_copied = copy_to_user(buffer, buf, len);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
index 74bc111b4849..cc4e9e2addb7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devlink.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
@@ -231,6 +231,5 @@ int i40e_devlink_create_port(struct i40e_pf *pf)
**/
void i40e_devlink_destroy_port(struct i40e_pf *pf)
{
- devlink_port_type_clear(&pf->devlink_port);
devlink_port_unregister(&pf->devlink_port);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index ece3a6b9a5c6..ab20202a3da3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -4,6 +4,7 @@
#ifndef _I40E_DIAG_H_
#define _I40E_DIAG_H_
+#include <linux/types.h>
#include "i40e_adminq_cmd.h"
/* forward-declare the HW struct for the compiler */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index fd7163128c4d..4e28785c9fb2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -430,35 +430,35 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
struct i40e_priv_flags {
char flag_string[ETH_GSTRING_LEN];
- u64 flag;
+ u8 bitno;
bool read_only;
};
-#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
+#define I40E_PRIV_FLAG(_name, _bitno, _read_only) { \
.flag_string = _name, \
- .flag = _flag, \
+ .bitno = _bitno, \
.read_only = _read_only, \
}
static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
/* NOTE: MFP setting cannot be changed */
- I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENA, 1),
I40E_PRIV_FLAG("total-port-shutdown",
- I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1),
- I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
- I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
- I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
- I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
+ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 1),
+ I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENA, 0),
+ I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENA, 0),
+ I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENA, 0),
+ I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENA, 0),
I40E_PRIV_FLAG("link-down-on-close",
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0),
- I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, 0),
+ I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX_ENA, 0),
I40E_PRIV_FLAG("disable-source-pruning",
- I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
- I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
+ I40E_FLAG_SOURCE_PRUNING_DIS, 0),
+ I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_FW_LLDP_DIS, 0),
I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
- I40E_FLAG_VF_VLAN_PRUNING, 0),
+ I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -466,7 +466,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
/* Private flags with a global effect, restricted to PF 0 */
static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("vf-true-promisc-support",
- I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
+ I40E_FLAG_TRUE_PROMISC_ENA, 0),
};
#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
@@ -502,7 +502,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
- if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -601,7 +601,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
10000baseKX4_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR &&
- !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseKR_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
@@ -609,7 +609,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
10000baseKR_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX &&
- !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseKX_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
@@ -917,7 +917,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
- if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
if (hw_link_info->requested_speeds &
@@ -1241,7 +1241,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
i40e_partition_setting_complaint(pf);
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
@@ -1488,12 +1488,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
int status = 0;
- u32 flags = 0;
int err = 0;
- flags = READ_ONCE(pf->flags);
- i40e_set_fec_in_flags(fec_cfg, &flags);
-
/* Get the current phy config */
memset(&abilities, 0, sizeof(abilities));
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
@@ -1525,7 +1521,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
err = -EAGAIN;
goto done;
}
- pf->flags = flags;
+ i40e_set_fec_in_flags(fec_cfg, pf->flags);
status = i40e_update_link_info(hw);
if (status)
/* debug level message only due to relation to the link
@@ -1599,7 +1595,7 @@ static int i40e_set_fec_param(struct net_device *netdev,
return -EPERM;
if (hw->mac.type == I40E_MAC_X722 &&
- !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
+ !test_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps)) {
netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n");
return -EOPNOTSUPP;
}
@@ -1714,7 +1710,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
@@ -1915,7 +1911,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
last = true;
}
- offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i);
ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
@@ -1956,9 +1952,8 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
val = X722_EEPROM_SCOPE_LIMIT + 1;
return val;
}
- val = (rd32(hw, I40E_GLPCI_LBARCTRL)
- & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
- >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ val = FIELD_GET(I40E_GLPCI_LBARCTRL_FL_SIZE_MASK,
+ rd32(hw, I40E_GLPCI_LBARCTRL));
/* register returns value in power of 2, 64Kbyte chunks. */
val = (64 * 1024) * BIT(val);
return val;
@@ -2015,6 +2010,18 @@ static void i40e_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
+static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ return I40E_MAX_NUM_DESCRIPTORS_XL710;
+ default:
+ return I40E_MAX_NUM_DESCRIPTORS;
+ }
+}
+
static void i40e_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -2022,10 +2029,10 @@ static void i40e_get_ringparam(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
- ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
- ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->rx_max_pending = i40e_get_max_num_descriptors(pf);
+ ring->tx_max_pending = i40e_get_max_num_descriptors(pf);
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = vsi->rx_rings[0]->count;
@@ -2050,12 +2057,12 @@ static int i40e_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
+ u32 new_rx_count, new_tx_count, max_num_descriptors;
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u32 new_rx_count, new_tx_count;
u16 tx_alloc_queue_pairs;
int timeout = 50;
int i, err = 0;
@@ -2063,14 +2070,15 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ max_num_descriptors = i40e_get_max_num_descriptors(pf);
+ if (ring->tx_pending > max_num_descriptors ||
ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
- ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending > max_num_descriptors ||
ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
netdev_info(netdev,
"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
ring->tx_pending, ring->rx_pending,
- I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ I40E_MIN_NUM_DESCRIPTORS, max_num_descriptors);
return -EINVAL;
}
@@ -2284,7 +2292,7 @@ static int i40e_get_stats_count(struct net_device *netdev)
struct i40e_pf *pf = vsi->back;
int stats_len;
- if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+ if (vsi->type == I40E_VSI_MAIN && pf->hw.partition_id == 1)
stats_len = I40E_PF_STATS_LEN;
else
stats_len = I40E_VSI_STATS_LEN;
@@ -2414,17 +2422,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_unlock();
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1)
goto check_data_pointer;
- veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->lan_veb < I40E_MAX_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+ veb = i40e_pf_get_main_veb(pf);
+ veb_stats = veb && test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
- if (veb_stats) {
- veb = pf->veb[pf->lan_veb];
+ if (veb_stats)
i40e_update_veb_stats(veb);
- }
/* If veb stats aren't enabled, pass NULL instead of the veb so that
* we initialize stats to zero and update the data pointer
@@ -2487,7 +2492,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
"rx", i);
}
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1)
goto check_data_pointer;
i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
@@ -2514,13 +2519,11 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
u8 *p = data;
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&p, "%s",
- i40e_gstrings_priv_flags[i].flag_string);
+ ethtool_puts(&p, i40e_gstrings_priv_flags[i].flag_string);
if (pf->hw.pf_id != 0)
return;
for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&p, "%s",
- i40e_gl_gstrings_priv_flags[i].flag_string);
+ ethtool_puts(&p, i40e_gl_gstrings_priv_flags[i].flag_string);
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -2548,7 +2551,7 @@ static int i40e_get_ts_info(struct net_device *dev,
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
/* only report HW timestamping if PTP is enabled */
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -2570,7 +2573,7 @@ static int i40e_get_ts_info(struct net_device *dev,
BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
- if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE)
+ if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
@@ -2786,7 +2789,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* NVM bit on means WoL disabled for the port */
@@ -2819,10 +2822,10 @@ static int i40e_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) {
pf->led_status = i40e_led_get(hw);
} else {
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps))
i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
NULL);
ret = i40e_led_get_phy(hw, &temp_status,
@@ -2831,25 +2834,25 @@ static int i40e_set_phys_id(struct net_device *netdev,
}
return blink_freq;
case ETHTOOL_ID_ON:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps))
i40e_led_set(hw, 0xf, false);
else
ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
break;
case ETHTOOL_ID_OFF:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps))
i40e_led_set(hw, 0x0, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
break;
case ETHTOOL_ID_INACTIVE:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) {
i40e_led_set(hw, pf->led_status, false);
} else {
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
I40E_PHY_LED_MODE_ORIG));
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps))
i40e_aq_set_phy_debug(hw, 0, NULL);
}
break;
@@ -2886,7 +2889,6 @@ static int __i40e_get_coalesce(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
- ec->rx_max_coalesced_frames_irq = vsi->work_limit;
/* rx and tx usecs has per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
@@ -3020,7 +3022,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ if (ec->tx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
if (queue < 0) {
@@ -3278,7 +3280,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
} else if (valid) {
data->flex_word = value & I40E_USERDEF_FLEX_WORD;
data->flex_offset =
- (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
+ FIELD_GET(I40E_USERDEF_FLEX_OFFSET, value);
data->flex_filter = true;
}
@@ -3365,6 +3367,7 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
struct i40e_rx_flow_userdef userdef = {0};
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
+ struct i40e_vsi *vsi;
u64 input_set;
u16 index;
@@ -3488,9 +3491,8 @@ no_input_set:
fsp->flow_type |= FLOW_EXT;
}
- if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
- struct i40e_vsi *vsi;
-
+ vsi = i40e_pf_get_main_vsi(pf);
+ if (rule->dest_vsi != vsi->id) {
vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
if (vsi && vsi->type == I40E_VSI_SRIOV) {
/* VFs are zero-indexed by the driver, but ethtool
@@ -3628,7 +3630,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
return -EOPNOTSUPP;
@@ -3644,19 +3646,22 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps))
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps))
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps)) {
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
@@ -3666,7 +3671,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
break;
case UDP_V6_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps)) {
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
@@ -4644,7 +4650,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
* main port cannot change them when in MFP mode as this would impact
* any filters on the other ports.
*/
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n");
return -EOPNOTSUPP;
}
@@ -4804,7 +4810,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -EINVAL;
pf = vsi->back;
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return -EOPNOTSUPP;
if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
@@ -5001,7 +5007,7 @@ static void i40e_get_channels(struct net_device *dev,
ch->max_combined = i40e_max_channels(vsi);
/* report info for other vector */
- ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+ ch->other_count = test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0;
ch->max_other = ch->other_count;
/* Note: This code assumes DCB is disabled for now. */
@@ -5044,7 +5050,7 @@ static int i40e_set_channels(struct net_device *dev,
return -EINVAL;
/* verify other_count has not changed */
- if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+ if (ch->other_count != (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0))
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
@@ -5109,15 +5115,13 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
/**
* i40e_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Returns 0 on
* success.
**/
-static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int i40e_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5125,13 +5129,12 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
int ret;
u16 i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- seed = key;
+ seed = rxfh->key;
lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -5139,7 +5142,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (ret)
goto out;
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- indir[i] = (u32)(lut[i]);
+ rxfh->indir[i] = (u32)(lut[i]);
out:
kfree(lut);
@@ -5150,15 +5153,15 @@ out:
/**
* i40e_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
-static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int i40e_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5166,17 +5169,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
u8 *seed = NULL;
u16 i;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (key) {
+ if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
- memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
+ memcpy(vsi->rss_hkey_user, rxfh->key, I40E_HKEY_ARRAY_SIZE);
seed = vsi->rss_hkey_user;
}
if (!vsi->rss_lut_user) {
@@ -5186,9 +5190,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- if (indir)
+ if (rxfh->indir)
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
else
i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
vsi->rss_size);
@@ -5215,11 +5219,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
u32 i, j, ret_flags = 0;
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
- priv_flags = &i40e_gstrings_priv_flags[i];
+ priv_flag = &i40e_gstrings_priv_flags[i];
- if (priv_flags->flag & pf->flags)
+ if (test_bit(priv_flag->bitno, pf->flags))
ret_flags |= BIT(i);
}
@@ -5227,11 +5231,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
return ret_flags;
for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
- priv_flags = &i40e_gl_gstrings_priv_flags[j];
+ priv_flag = &i40e_gl_gstrings_priv_flags[j];
- if (priv_flags->flag & pf->flags)
+ if (test_bit(priv_flag->bitno, pf->flags))
ret_flags |= BIT(i + j);
}
@@ -5245,8 +5249,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
**/
static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
{
+ DECLARE_BITMAP(changed_flags, I40E_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS);
struct i40e_netdev_priv *np = netdev_priv(dev);
- u64 orig_flags, new_flags, changed_flags;
enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -5254,51 +5260,57 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
int status;
u32 i, j;
- orig_flags = READ_ONCE(pf->flags);
- new_flags = orig_flags;
+ bitmap_copy(orig_flags, pf->flags, I40E_PF_FLAGS_NBITS);
+ bitmap_copy(new_flags, pf->flags, I40E_PF_FLAGS_NBITS);
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- const struct i40e_priv_flags *priv_flags;
-
- priv_flags = &i40e_gstrings_priv_flags[i];
+ const struct i40e_priv_flags *priv_flag;
+ bool new_val;
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
+ priv_flag = &i40e_gstrings_priv_flags[i];
+ new_val = (flags & BIT(i)) ? true : false;
/* If this is a read-only flag, it can't be changed */
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
+ if (priv_flag->read_only &&
+ test_bit(priv_flag->bitno, orig_flags) != new_val)
return -EOPNOTSUPP;
+
+ if (new_val)
+ set_bit(priv_flag->bitno, new_flags);
+ else
+ clear_bit(priv_flag->bitno, new_flags);
}
if (pf->hw.pf_id != 0)
goto flags_complete;
for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
+ bool new_val;
- priv_flags = &i40e_gl_gstrings_priv_flags[j];
-
- if (flags & BIT(i + j))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
+ priv_flag = &i40e_gl_gstrings_priv_flags[j];
+ new_val = (flags & BIT(i + j)) ? true : false;
/* If this is a read-only flag, it can't be changed */
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
+ if (priv_flag->read_only &&
+ test_bit(priv_flag->bitno, orig_flags) != new_val)
return -EOPNOTSUPP;
+
+ if (new_val)
+ set_bit(priv_flag->bitno, new_flags);
+ else
+ clear_bit(priv_flag->bitno, new_flags);
}
flags_complete:
- changed_flags = orig_flags ^ new_flags;
+ bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS);
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags))
reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
- if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
- I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
+
+ if (test_bit(I40E_FLAG_VEB_STATS_ENA, changed_flags) ||
+ test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) ||
+ test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, changed_flags))
reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
/* Before we finalize any flag changes, we need to perform some
@@ -5306,8 +5318,8 @@ flags_complete:
*/
/* ATR eviction is not supported on all devices */
- if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) &&
- !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, new_flags) &&
+ !test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps))
return -EOPNOTSUPP;
/* If the driver detected FW LLDP was disabled on init, this flag could
@@ -5318,15 +5330,14 @@ flags_complete:
* disable LLDP, however we _must_ not allow the user to enable/disable
* LLDP with this flag on unsupported FW versions.
*/
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
- dev_warn(&pf->pdev->dev,
- "Device does not support changing FW LLDP\n");
- return -EOPNOTSUPP;
- }
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags) &&
+ !test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps)) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FW LLDP\n");
+ return -EOPNOTSUPP;
}
- if (changed_flags & I40E_FLAG_RS_FEC &&
+ if (test_bit(I40E_FLAG_RS_FEC, changed_flags) &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B) {
dev_warn(&pf->pdev->dev,
@@ -5334,7 +5345,7 @@ flags_complete:
return -EOPNOTSUPP;
}
- if (changed_flags & I40E_FLAG_BASE_R_FEC &&
+ if (test_bit(I40E_FLAG_BASE_R_FEC, changed_flags) &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B &&
pf->hw.device_id != I40E_DEV_ID_KX_X722) {
@@ -5349,17 +5360,17 @@ flags_complete:
*/
/* Flush current ATR settings if ATR was disabled */
- if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
- !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, changed_flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA, new_flags)) {
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
- if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
+ if (test_bit(I40E_FLAG_TRUE_PROMISC_ENA, changed_flags)) {
u16 sw_flags = 0, valid_flags = 0;
int ret;
- if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ if (!test_bit(I40E_FLAG_TRUE_PROMISC_ENA, new_flags))
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
@@ -5374,17 +5385,17 @@ flags_complete:
}
}
- if ((changed_flags & I40E_FLAG_RS_FEC) ||
- (changed_flags & I40E_FLAG_BASE_R_FEC)) {
+ if (test_bit(I40E_FLAG_RS_FEC, changed_flags) ||
+ test_bit(I40E_FLAG_BASE_R_FEC, changed_flags)) {
u8 fec_cfg = 0;
- if (new_flags & I40E_FLAG_RS_FEC &&
- new_flags & I40E_FLAG_BASE_R_FEC) {
+ if (test_bit(I40E_FLAG_RS_FEC, new_flags) &&
+ test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) {
fec_cfg = I40E_AQ_SET_FEC_AUTO;
- } else if (new_flags & I40E_FLAG_RS_FEC) {
+ } else if (test_bit(I40E_FLAG_RS_FEC, new_flags)) {
fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
I40E_AQ_SET_FEC_ABILITY_RS);
- } else if (new_flags & I40E_FLAG_BASE_R_FEC) {
+ } else if (test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) {
fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
I40E_AQ_SET_FEC_ABILITY_KR);
}
@@ -5392,35 +5403,35 @@ flags_complete:
dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
}
- if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
- (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) {
+ if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) &&
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, orig_flags)) {
dev_err(&pf->pdev->dev,
"Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) &&
+ if (test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, changed_flags) &&
pf->num_alloc_vfs) {
dev_warn(&pf->pdev->dev,
"Changing vf-vlan-pruning flag while VF(s) are active is not supported\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & I40E_FLAG_LEGACY_RX) &&
+ if (test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) &&
I40E_2K_TOO_SMALL_WITH_PADDING) {
dev_warn(&pf->pdev->dev,
"2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & new_flags &
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
- (new_flags & I40E_FLAG_MFP_ENABLED))
+ if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) &&
+ test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, new_flags) &&
+ test_bit(I40E_FLAG_MFP_ENA, new_flags))
dev_warn(&pf->pdev->dev,
"Turning on link-down-on-close flag may affect other partitions\n");
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags)) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, new_flags)) {
#ifdef CONFIG_I40E_DCB
i40e_dcb_sw_default_config(pf);
#endif /* CONFIG_I40E_DCB */
@@ -5461,7 +5472,7 @@ flags_complete:
* initialization or (b) while holding the RTNL lock, we don't need
* anything fancy here.
*/
- pf->flags = new_flags;
+ bitmap_copy(pf->flags, new_flags, I40E_PF_FLAGS_NBITS);
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
@@ -5491,7 +5502,7 @@ static int i40e_get_module_info(struct net_device *netdev,
int status;
/* Check if firmware supports reading module EEPROM. */
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n");
return -EINVAL;
}
@@ -5571,8 +5582,8 @@ static int i40e_get_module_info(struct net_device *netdev,
modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
break;
default:
- netdev_err(vsi->netdev, "Module type unrecognized\n");
- return -EINVAL;
+ netdev_dbg(vsi->netdev, "SFP module type unrecognized or no SFP connector used.\n");
+ return -EOPNOTSUPP;
}
return 0;
}
@@ -5630,7 +5641,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
@@ -5650,16 +5661,12 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
- edata->supported = SUPPORTED_Autoneg;
- edata->lp_advertised = edata->supported;
-
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
- edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
@@ -5668,7 +5675,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
}
static int i40e_is_eee_param_supported(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5677,7 +5684,6 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
u32 value;
const char *name;
} param[] = {
- {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
{edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
@@ -5695,7 +5701,7 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
return 0;
}
-static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
@@ -5768,7 +5774,7 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
static const struct ethtool_ops i40e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH |
ETHTOOL_COALESCE_TX_USECS_HIGH,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 3157d14d9b12..1f188c052828 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -100,6 +100,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
@@ -107,12 +108,18 @@ static struct workqueue_struct *i40e_wq;
static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
struct net_device *netdev, int delta)
{
+ struct netdev_hw_addr_list *ha_list;
struct netdev_hw_addr *ha;
if (!f || !netdev)
return;
- netdev_for_each_mc_addr(ha, netdev) {
+ if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
+ ha_list = &netdev->uc;
+ else
+ ha_list = &netdev->mc;
+
+ netdev_hw_addr_list_for_each(ha, ha_list) {
if (ether_addr_equal(ha->addr, f->macaddr)) {
ha->refcount += delta;
if (ha->refcount <= 0)
@@ -304,11 +311,12 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
**/
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->id == id))
- return pf->vsi[i];
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->id == id)
+ return vsi;
return NULL;
}
@@ -371,7 +379,7 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
head = i40e_get_head(tx_ring);
/* Read interrupt register */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
val = rd32(&pf->hw,
I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
tx_ring->vsi->base_vector - 1));
@@ -546,24 +554,19 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
{
+ struct i40e_veb *veb;
int i;
memset(&pf->stats, 0, sizeof(pf->stats));
memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
pf->stat_offsets_loaded = false;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i]) {
- memset(&pf->veb[i]->stats, 0,
- sizeof(pf->veb[i]->stats));
- memset(&pf->veb[i]->stats_offsets, 0,
- sizeof(pf->veb[i]->stats_offsets));
- memset(&pf->veb[i]->tc_stats, 0,
- sizeof(pf->veb[i]->tc_stats));
- memset(&pf->veb[i]->tc_stats_offsets, 0,
- sizeof(pf->veb[i]->tc_stats_offsets));
- pf->veb[i]->stat_offsets_loaded = false;
- }
+ i40e_pf_for_each_veb(pf, i, veb) {
+ memset(&veb->stats, 0, sizeof(veb->stats));
+ memset(&veb->stats_offsets, 0, sizeof(veb->stats_offsets));
+ memset(&veb->tc_stats, 0, sizeof(veb->tc_stats));
+ memset(&veb->tc_stats_offsets, 0, sizeof(veb->tc_stats_offsets));
+ veb->stat_offsets_loaded = false;
}
pf->hw_csum_rx_error = 0;
}
@@ -987,7 +990,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
ns->tx_dropped = es->tx_discards;
/* pull in a couple PF stats if this is the main vsi */
- if (vsi == pf->vsi[pf->lan_vsi]) {
+ if (vsi->type == I40E_VSI_MAIN) {
ns->rx_crc_errors = pf->stats.crc_errors;
ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
ns->rx_length_errors = pf->stats.rx_length_errors;
@@ -1197,11 +1200,9 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ FIELD_GET(I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK, val);
nsd->rx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ FIELD_GET(I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK, val);
i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
pf->stat_offsets_loaded,
&osd->tx_lpi_count, &nsd->tx_lpi_count);
@@ -1209,13 +1210,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->rx_lpi_count, &nsd->rx_lpi_count);
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
!test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
!test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
nsd->fd_atr_status = true;
else
@@ -1234,7 +1235,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
@@ -1253,8 +1254,11 @@ int i40e_count_filters(struct i40e_vsi *vsi)
int bkt;
int cnt = 0;
- hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
- ++cnt;
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_ACTIVE)
+ ++cnt;
+ }
return cnt;
}
@@ -1485,7 +1489,7 @@ static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
return pvid;
is_any = (trusted ||
- !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
+ !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags));
if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
(!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
@@ -1890,7 +1894,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
u8 *lut;
int ret;
- if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return 0;
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
@@ -2045,7 +2049,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
*/
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
- else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
vsi->num_queue_pairs = pf->num_lan_msix;
else
vsi->num_queue_pairs = 1;
@@ -2058,7 +2062,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
else
num_tc_qps = vsi->alloc_queue_pairs;
- if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
/* Find numtc from enabled TC bitmap */
for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i)) /* TC is enabled */
@@ -2077,7 +2081,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Do not allow use more TC queue pairs than MSI-X vectors exist */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
/* Setup queue offset/count for all TCs for given VSI */
@@ -2089,8 +2093,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
switch (vsi->type) {
case I40E_VSI_MAIN:
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED)) ||
+ if ((!test_bit(I40E_FLAG_FD_SB_ENA,
+ pf->flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA,
+ pf->flags)) ||
vsi->tc_config.enabled_tc != 1) {
qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps);
@@ -2470,13 +2476,13 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
**/
static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
int aq_ret;
if (vsi->type == I40E_VSI_MAIN &&
- pf->lan_veb != I40E_NO_VEB &&
- !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ i40e_pf_get_main_veb(pf) &&
+ !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
* promisc behavior but will not get VF or VMDq traffic
@@ -2873,6 +2879,7 @@ err_no_memory_locked:
**/
static void i40e_sync_filters_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
if (!pf)
@@ -2884,11 +2891,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
}
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] &&
- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
- !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
- int ret = i40e_sync_vsi_filters(pf->vsi[v]);
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+ !test_bit(__I40E_VSI_RELEASING, vsi->state)) {
+ int ret = i40e_sync_vsi_filters(vsi);
if (ret) {
/* come back and try again later */
@@ -2907,7 +2913,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
*/
static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
{
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags))
return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048);
return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
@@ -2955,7 +2961,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
@@ -3462,8 +3468,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
ring->xsk_pool = i40e_xsk_pool(ring);
/* some ATR related tx ring init */
- if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
- ring->atr_sample_rate = vsi->back->atr_sample_rate;
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) {
+ ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
ring->atr_count = 0;
} else {
ring->atr_sample_rate = 0;
@@ -3478,9 +3484,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.new_context = 1;
tx_ctx.base = (ring->dma / 128);
tx_ctx.qlen = ring->count;
- tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED));
- tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+ if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags))
+ tx_ctx.fd_ena = 1;
+ if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags))
+ tx_ctx.timesync_ena = 1;
/* FDIR VSI tx ring can still use RS bit and writebacks */
if (vsi->type != I40E_VSI_FDIR)
tx_ctx.head_wb_ena = 1;
@@ -3532,21 +3540,19 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
else
return -EINVAL;
- qtx_ctl |= (ring->ch->vsi_number <<
- I40E_QTX_CTL_VFVM_INDX_SHIFT) &
- I40E_QTX_CTL_VFVM_INDX_MASK;
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ ring->ch->vsi_number);
} else {
if (vsi->type == I40E_VSI_VMDQ2) {
qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
- qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
- I40E_QTX_CTL_VFVM_INDX_MASK;
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ vsi->id);
} else {
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
}
}
- qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
- I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
@@ -3582,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx;
int err = 0;
bool ok;
- int ret;
bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
- if (ring->vsi->type == I40E_VSI_MAIN)
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->rx_buf_len = vsi->rx_buf_len;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (ring->vsi->type != I40E_VSI_MAIN)
+ goto skip;
+
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ring->rx_buf_len =
- xsk_pool_get_rx_frame_size(ring->xsk_pool);
- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
- if (ret)
- return ret;
+ if (err)
+ return err;
dev_info(&vsi->back->pdev->dev,
"Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index);
} else {
- ring->rx_buf_len = vsi->rx_buf_len;
- if (ring->vsi->type == I40E_VSI_MAIN) {
- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (ret)
- return ret;
- }
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
}
+skip:
xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
@@ -3663,7 +3684,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
}
/* configure Rx buffer alignment */
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+ if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) {
if (I40E_2K_TOO_SMALL_WITH_PADDING) {
dev_info(&vsi->back->pdev->dev,
"2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
@@ -3761,7 +3782,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */
for (i = 0; i < vsi->num_queue_pairs; i++) {
rx_ring = vsi->rx_rings[i];
@@ -3828,7 +3849,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return;
/* Reset FDir counters as we're replaying all existing filters */
@@ -3894,6 +3915,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
+ /* Set ITR for software interrupts triggered after exiting
+ * busy-loop polling.
+ */
+ wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1),
+ I40E_ITR_20K);
+
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
@@ -3966,10 +3993,10 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
- if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, val);
@@ -4205,7 +4232,7 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
}
/* disable each interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = vsi->base_vector;
i < (vsi->num_q_vectors + vsi->base_vector); i++)
wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
@@ -4231,7 +4258,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
int i;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
@@ -4252,7 +4279,7 @@ static void i40e_free_misc_vector(struct i40e_pf *pf)
wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
i40e_flush(&pf->hw);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
@@ -4287,7 +4314,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0 & I40E_PFINT_ICR0_SWINT_MASK))
pf->sw_int_count++;
- if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
(icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
@@ -4296,7 +4323,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
/* We do not have a way to disarm Queue causes while leaving
@@ -4338,8 +4365,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
val = rd32(hw, I40E_GLGEN_RSTAT);
- val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
- >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ val = FIELD_GET(I40E_GLGEN_RSTAT_RESET_TYPE_MASK, val);
if (val == I40E_RESET_CORER) {
pf->corer_count++;
} else if (val == I40E_RESET_GLOBR) {
@@ -4480,7 +4506,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags))
i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
return budget > 0;
@@ -4593,9 +4619,9 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
struct i40e_pf *pf = vsi->back;
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
err = i40e_vsi_request_irq_msix(vsi, basename);
- else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+ else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags))
err = request_irq(pf->pdev->irq, i40e_intr, 0,
pf->int_name, pf);
else
@@ -4627,7 +4653,7 @@ static void i40e_netpoll(struct net_device *netdev)
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
@@ -4906,27 +4932,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int pf_q, err, q_end;
+ u32 pf_q, tx_q_end, rx_q_end;
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- q_end = vsi->base_queue + vsi->num_queue_pairs;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
- i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+ tx_q_end = vsi->base_queue +
+ vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false);
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
- err = i40e_control_wait_rx_q(pf, pf_q, false);
- if (err)
- dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d disable timeout\n",
- vsi->seid, pf_q);
- }
+ rx_q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++)
+ i40e_control_rx_q(pf, pf_q, false);
msleep(I40E_DISABLE_TX_GAP_MSEC);
- pf_q = vsi->base_queue;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
i40e_vsi_wait_queues_disabled(vsi);
@@ -4967,7 +4989,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u32 val, qp;
int i;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
if (!vsi->q_vectors)
return;
@@ -5001,8 +5023,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
* next_q field of the registers.
*/
val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
- qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
- >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK,
+ val);
val |= I40E_QUEUE_END_OF_LIST
<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
@@ -5024,8 +5046,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
val = rd32(hw, I40E_QINT_TQCTL(qp));
- next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
- >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+ next = FIELD_GET(I40E_QINT_TQCTL_NEXTQ_INDX_MASK,
+ val);
val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
I40E_QINT_TQCTL_MSIX0_INDX_MASK |
@@ -5043,8 +5065,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
free_irq(pf->pdev->irq, pf);
val = rd32(hw, I40E_PFINT_LNKLST0);
- qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
- >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, val);
val |= I40E_QUEUE_END_OF_LIST
<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
wr32(hw, I40E_PFINT_LNKLST0, val);
@@ -5129,16 +5150,17 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
{
/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
pci_disable_msix(pf->pdev);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
kfree(pf->irq_pile);
pf->irq_pile = NULL;
- } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+ } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
pci_disable_msi(pf->pdev);
}
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+ clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
}
/**
@@ -5150,6 +5172,7 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
**/
static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
@@ -5159,9 +5182,10 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
I40E_IWARP_IRQ_PILE_ID);
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- i40e_vsi_free_q_vectors(pf->vsi[i]);
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_free_q_vectors(vsi);
+
i40e_reset_interrupt_capability(pf);
}
@@ -5258,12 +5282,11 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
**/
static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_quiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_quiesce_vsi(vsi);
}
/**
@@ -5272,12 +5295,11 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
**/
static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_unquiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_unquiesce_vsi(vsi);
}
/**
@@ -5338,14 +5360,13 @@ wait_rx:
**/
static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v, ret = 0;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
- if (pf->vsi[v]) {
- ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
- if (ret)
- break;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ ret = i40e_vsi_wait_queues_disabled(vsi);
+ if (ret)
+ break;
}
return ret;
@@ -5452,7 +5473,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
**/
static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
u8 enabled_tc = 1, i;
@@ -5469,21 +5490,22 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
u8 i, enabled_tc = 1;
u8 num_tc = 0;
- struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- if (i40e_is_tc_mqprio_enabled(pf))
- return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+
+ return vsi->mqprio_qopt.qopt.num_tc;
+ }
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return 1;
/* SFP mode will be enabled for all TCs on port */
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- return i40e_dcb_get_num_tc(dcbcfg);
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
+ return i40e_dcb_get_num_tc(&pf->hw.local_dcbx_config);
/* MFP mode return count of enabled TCs for this PF */
if (pf->hw.func_caps.iscsi)
@@ -5512,11 +5534,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
/* If neither MQPRIO nor DCB is enabled for this PF then just return
* default TC
*/
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
/* MFP enabled and iSCSI PF type */
@@ -5605,7 +5627,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
/* There is no need to reset BW when mqprio mode is on. */
if (i40e_is_tc_mqprio_enabled(pf))
return 0;
- if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
dev_info(&pf->pdev->dev,
@@ -5858,7 +5880,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
}
vsi->reconfig_rss = false;
}
- if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
@@ -5896,6 +5918,28 @@ out:
}
/**
+ * i40e_vsi_reconfig_tc - Reconfigure VSI Tx Scheduler for stored TC map
+ * @vsi: VSI to be reconfigured
+ *
+ * This reconfigures a particular VSI for TCs that are mapped to the
+ * TC bitmap stored previously for the VSI.
+ *
+ * Context: It is expected that the VSI queues have been quisced before
+ * calling this function.
+ *
+ * Return: 0 on success, negative value on failure
+ **/
+static int i40e_vsi_reconfig_tc(struct i40e_vsi *vsi)
+{
+ u8 enabled_tc;
+
+ enabled_tc = vsi->tc_config.enabled_tc;
+ vsi->tc_config.enabled_tc = 0;
+
+ return i40e_vsi_config_tc(vsi, enabled_tc);
+}
+
+/**
* i40e_get_link_speed - Returns link speed for the interface
* @vsi: VSI to be configured
*
@@ -6271,7 +6315,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
if (ch->type == I40E_VSI_VMDQ2)
ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id =
@@ -6457,6 +6501,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
+ struct i40e_vsi *main_vsi;
u8 vsi_type;
u16 seid;
int ret;
@@ -6470,7 +6515,8 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
}
/* underlying switching element */
- seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ seid = main_vsi->uplink_seid;
/* create channel (VSI), configure TX rings */
ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
@@ -6576,8 +6622,8 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
* VSI to be added switch to VEB mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
if (vsi->type == I40E_VSI_MAIN) {
if (i40e_is_tc_mqprio_enabled(pf))
@@ -6762,51 +6808,48 @@ out:
**/
static void i40e_dcb_reconfigure(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
u8 tc_map = 0;
int ret;
- u8 v;
+ int v;
/* Enable the TCs available on PF to all VEBs */
tc_map = i40e_pf_get_tc_map(pf);
if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
return;
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
- ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_veb_config_tc(veb, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VEB seid=%d\n",
- pf->veb[v]->seid);
+ veb->seid);
/* Will try to configure as many components */
}
}
/* Update each VSI */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v])
- continue;
-
+ i40e_pf_for_each_vsi(pf, v, vsi) {
/* - Enable all TCs for the LAN VSI
* - For all others keep them at TC0 for now
*/
- if (v == pf->lan_vsi)
+ if (vsi->type == I40E_VSI_MAIN)
tc_map = i40e_pf_get_tc_map(pf);
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
- ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ ret = i40e_vsi_config_tc(vsi, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VSI seid=%d\n",
- pf->vsi[v]->seid);
+ vsi->seid);
/* Will try to configure as many components */
} else {
/* Re-configure VSI vectors based on updated TC map */
- i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
- if (pf->vsi[v]->netdev)
- i40e_dcbnl_set_all(pf->vsi[v]);
+ i40e_vsi_map_rings_to_vectors(vsi);
+ if (vsi->netdev)
+ i40e_dcbnl_set_all(vsi);
}
}
}
@@ -6988,9 +7031,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (need_reconfig) {
/* Enable DCB tagging only when more than one TC */
if (new_numtc > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
@@ -7030,7 +7073,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
/* Configure Rx Packet Buffers in HW */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
+ mfs_tc[i] = main_vsi->netdev->mtu;
mfs_tc[i] += I40E_PACKET_HDR_PAD;
}
@@ -7080,7 +7125,7 @@ out:
set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
/* registers are set, lets apply */
- if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+ if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps))
ret = i40e_hw_set_dcb_config(pf, new_cfg);
}
@@ -7101,7 +7146,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err;
- if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
+ if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) {
/* Update the local cached instance with TC0 ETS */
memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
@@ -7162,12 +7207,12 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
* Also do not enable DCBx if FW LLDP agent is disabled
*/
- if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
+ if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) {
dev_info(&pf->pdev->dev, "DCB is not supported.\n");
err = -EOPNOTSUPP;
goto out;
}
- if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) {
dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
err = i40e_dcb_sw_default_config(pf);
if (err) {
@@ -7178,8 +7223,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->dcbx_cap = DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
/* at init capable but disabled */
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
goto out;
}
err = i40e_init_dcb(hw, true);
@@ -7194,20 +7239,20 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Enable DCB tagging only when more than one TC
* or explicitly disable if only one TC
*/
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
- pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
+ set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %pe aq_err %s\n",
@@ -7367,7 +7412,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
i40e_vsi_configure_msix(vsi);
else
i40e_configure_msi_and_legacy(vsi);
@@ -7471,7 +7516,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
* and its speed values are OK, no need for a flap
* if non_zero_phy_type was set, still need to force up
*/
- if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
+ if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags))
non_zero_phy_type = true;
else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
return 0;
@@ -7487,7 +7532,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
/* Copy the old settings, except of phy_type */
config.abilities = abilities.abilities;
- if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
+ if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
if (is_up)
config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
else
@@ -7537,8 +7582,8 @@ int i40e_up(struct i40e_vsi *vsi)
int err;
if (vsi->type == I40E_VSI_MAIN &&
- (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
- vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
i40e_force_link_state(vsi->back, true);
err = i40e_vsi_configure(vsi);
@@ -7566,8 +7611,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
i40e_vsi_stop_rings(vsi);
if (vsi->type == I40E_VSI_MAIN &&
- (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
- vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
i40e_force_link_state(vsi->back, false);
i40e_napi_disable_all(vsi);
@@ -7973,7 +8018,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
struct i40e_fwd_adapter *fwd;
int avail_macvlan, ret;
- if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
return ERR_PTR(-EINVAL);
}
@@ -8168,23 +8213,23 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
hw = mqprio_qopt->qopt.hw;
mode = mqprio_qopt->mode;
if (!hw) {
- pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+ clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
goto config_tc;
}
/* Check if MFP enabled */
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
netdev_info(netdev,
"Configuring TC not supported in MFP mode\n");
return ret;
}
switch (mode) {
case TC_MQPRIO_MODE_DCB:
- pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+ clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
/* Check if DCB enabled to continue */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev,
"DCB is not enabled for adapter\n");
return ret;
@@ -8198,20 +8243,20 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
}
break;
case TC_MQPRIO_MODE_CHANNEL:
- if (pf->flags & I40E_FLAG_DCB_ENABLED) {
+ if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev,
"Full offload of TC Mqprio options is not supported when DCB is enabled\n");
return ret;
}
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return ret;
ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
if (ret)
return ret;
memcpy(&vsi->mqprio_qopt, mqprio_qopt,
sizeof(*mqprio_qopt));
- pf->flags |= I40E_FLAG_TC_MQPRIO;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
break;
default:
return -EINVAL;
@@ -8626,6 +8671,10 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -8795,11 +8844,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
return -EINVAL;
}
- if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) {
dev_err(&vsi->back->pdev->dev,
"Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
- vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags);
}
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
@@ -8895,11 +8944,11 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
pf->num_cloud_filters--;
if (!pf->num_cloud_filters)
- if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
- !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
return 0;
}
@@ -9096,7 +9145,7 @@ err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
return err;
@@ -9200,11 +9249,11 @@ static void i40e_cloud_filter_exit(struct i40e_pf *pf)
}
pf->num_cloud_filters = 0;
- if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
- !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
}
@@ -9241,7 +9290,9 @@ int i40e_close(struct net_device *netdev)
**/
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
+ struct i40e_vsi *vsi;
u32 val;
+ int i;
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9292,34 +9343,25 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
i40e_prep_for_reset(pf);
i40e_reset_and_rebuild(pf, true, lock_acquired);
dev_info(&pf->pdev->dev,
- pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that requested a re-init */
- dev_info(&pf->pdev->dev,
- "VSI reinit requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
+ dev_info(&pf->pdev->dev, "VSI reinit requested\n");
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
vsi->state))
- i40e_vsi_reinit_locked(pf->vsi[v]);
+ i40e_vsi_reinit_locked(vsi);
}
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that needs to be brought down */
dev_info(&pf->pdev->dev, "VSI down requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
vsi->state)) {
set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi);
@@ -9407,12 +9449,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
(hw->phy.link_info.link_speed &
~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
- !(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
/* let firmware decide if the DCB should be disabled */
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -9448,7 +9490,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
dev_warn(&pf->pdev->dev,
"DCB is not supported for X710-T*L 2.5/5G speeds\n");
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
@@ -9476,9 +9518,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Enable DCB tagging only when more than one TC */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
@@ -9546,18 +9588,18 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
queue, qtx_ctl);
+ if (FIELD_GET(I40E_QTX_CTL_PFVF_Q_MASK, qtx_ctl) !=
+ I40E_QTX_CTL_VF_QUEUE)
+ return;
+
/* Queue belongs to VF, find the VF and issue VF reset */
- if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
- >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
- vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
- >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
- vf_id -= hw->func_caps.vf_base_id;
- vf = &pf->vf[vf_id];
- i40e_vc_notify_vf_reset(vf);
- /* Allow VF to process pending reset notification */
- msleep(20);
- i40e_reset_vf(vf, false);
- }
+ vf_id = FIELD_GET(I40E_QTX_CTL_VFVM_INDX_MASK, qtx_ctl);
+ vf_id -= hw->func_caps.vf_base_id;
+ vf = &pf->vf[vf_id];
+ i40e_vc_notify_vf_reset(vf);
+ /* Allow VF to process pending reset notification */
+ msleep(20);
+ i40e_reset_vf(vf, false);
}
/**
@@ -9583,8 +9625,7 @@ u32 i40e_get_current_fd_count(struct i40e_pf *pf)
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
- ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
- I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ FIELD_GET(I40E_PFQF_FDSTAT_BEST_CNT_MASK, val);
return fcnt_prog;
}
@@ -9598,8 +9639,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
- ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
- I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+ FIELD_GET(I40E_GLQF_FDCNT_0_BESTCNT_MASK, val);
return fcnt_prog;
}
@@ -9610,7 +9650,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
{
if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
@@ -9631,7 +9671,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
@@ -9796,7 +9836,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
} else {
/* replay sideband filters */
- i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_filter_restore(i40e_pf_get_main_vsi(pf));
if (!disable_atr && !pf->fd_tcp4_filter_cnt)
clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
@@ -9874,6 +9914,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
**/
static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
{
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
int i;
@@ -9881,15 +9922,10 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
return;
pf = veb->pf;
- /* depth first... */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
- i40e_veb_link_event(pf->veb[i], link_up);
-
- /* ... now the local VSIs */
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
- i40e_vsi_link_event(pf->vsi[i], link_up);
+ /* Send link event to contained VSIs */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == veb->seid)
+ i40e_vsi_link_event(vsi, link_up);
}
/**
@@ -9898,7 +9934,8 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
**/
static void i40e_link_event(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct i40e_veb *veb = i40e_pf_get_main_veb(pf);
u8 new_link_speed, old_link_speed;
bool new_link, old_link;
int status;
@@ -9938,15 +9975,15 @@ static void i40e_link_event(struct i40e_pf *pf)
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
*/
- if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
- i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+ if (veb)
+ i40e_veb_link_event(veb, new_link);
else
i40e_vsi_link_event(vsi, new_link);
if (pf->vf)
i40e_vc_notify_link_state(pf);
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
i40e_ptp_set_increment(pf);
#ifdef CONFIG_I40E_DCB
if (new_link == old_link)
@@ -9963,13 +10000,13 @@ static void i40e_link_event(struct i40e_pf *pf)
memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
err = i40e_dcb_sw_default_config(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
} else {
pf->dcbx_cap = DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
}
}
#endif /* CONFIG_I40E_DCB */
@@ -9981,6 +10018,8 @@ static void i40e_link_event(struct i40e_pf *pf)
**/
static void i40e_watchdog_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* if interface is down do nothing */
@@ -9994,22 +10033,21 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
- if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+ if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) ||
test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->netdev)
- i40e_update_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->netdev)
+ i40e_update_stats(vsi);
- if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+ if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
/* Update the stats for the active switching components */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i])
- i40e_update_veb_stats(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_update_veb_stats(veb);
}
i40e_ptp_rx_hang(pf);
@@ -10094,7 +10132,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)) &&
- (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
+ (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) {
dev_err(&pf->pdev->dev,
"Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
dev_err(&pf->pdev->dev,
@@ -10122,7 +10160,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
return;
/* check for error indications */
- val = rd32(&pf->hw, pf->hw.aq.arq.len);
+ val = rd32(&pf->hw, I40E_PF_ARQLEN);
oldval = val;
if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
if (hw->debug_mask & I40E_DEBUG_AQ)
@@ -10141,9 +10179,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
}
if (oldval != val)
- wr32(&pf->hw, pf->hw.aq.arq.len, val);
+ wr32(&pf->hw, I40E_PF_ARQLEN, val);
- val = rd32(&pf->hw, pf->hw.aq.asq.len);
+ val = rd32(&pf->hw, I40E_PF_ATQLEN);
oldval = val;
if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
if (pf->hw.debug_mask & I40E_DEBUG_AQ)
@@ -10161,7 +10199,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
}
if (oldval != val)
- wr32(&pf->hw, pf->hw.aq.asq.len, val);
+ wr32(&pf->hw, I40E_PF_ATQLEN, val);
event.buf_len = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
@@ -10221,9 +10259,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
opcode);
break;
}
- } while (i++ < pf->adminq_work_limit);
+ } while (i++ < I40E_AQ_WORK_LIMIT);
- if (i < pf->adminq_work_limit)
+ if (i < I40E_AQ_WORK_LIMIT)
clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
/* re-enable Admin queue interrupt cause */
@@ -10268,7 +10306,7 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
**/
static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_vsi_context ctxt;
int ret;
@@ -10304,7 +10342,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
**/
static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_vsi_context ctxt;
int ret;
@@ -10354,89 +10392,84 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
}
/**
- * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * i40e_reconstitute_veb - rebuild the VEB and VSIs connected to it
* @veb: pointer to the VEB instance
*
- * This is a recursive function that first builds the attached VSIs then
- * recurses in to build the next layer of VEB. We track the connections
- * through our own index numbers because the seid's from the HW could
- * change across the reset.
+ * This is a function that builds the attached VSIs. We track the connections
+ * through our own index numbers because the seid's from the HW could change
+ * across the reset.
**/
static int i40e_reconstitute_veb(struct i40e_veb *veb)
{
struct i40e_vsi *ctl_vsi = NULL;
struct i40e_pf *pf = veb->pf;
- int v, veb_idx;
- int ret;
+ struct i40e_vsi *vsi;
+ int v, ret;
- /* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
- if (pf->vsi[v] &&
- pf->vsi[v]->veb_idx == veb->idx &&
- pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
- ctl_vsi = pf->vsi[v];
- break;
- }
- }
- if (!ctl_vsi) {
- dev_info(&pf->pdev->dev,
- "missing owner VSI for veb_idx %d\n", veb->idx);
- ret = -ENOENT;
- goto end_reconstitute;
+ /* As we do not maintain PV (port virtualizer) switch element then
+ * there can be only one non-floating VEB that have uplink to MAC SEID
+ * and its control VSI is the main one.
+ */
+ if (WARN_ON(veb->uplink_seid && veb->uplink_seid != pf->mac_seid)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid uplink SEID for VEB %d\n", veb->idx);
+ return -ENOENT;
}
- if (ctl_vsi != pf->vsi[pf->lan_vsi])
- ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- ret = i40e_add_vsi(ctl_vsi);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "rebuild of veb_idx %d owner VSI failed: %d\n",
- veb->idx, ret);
- goto end_reconstitute;
+
+ if (veb->uplink_seid == pf->mac_seid) {
+ /* Check that the LAN VSI has VEB owning flag set */
+ ctl_vsi = i40e_pf_get_main_vsi(pf);
+
+ if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
+ !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
+ dev_err(&pf->pdev->dev,
+ "Invalid control VSI for VEB %d\n", veb->idx);
+ return -ENOENT;
+ }
+
+ /* Add the control VSI to switch */
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Rebuild of owner VSI for VEB %d failed: %d\n",
+ veb->idx, ret);
+ return ret;
+ }
+
+ i40e_vsi_reset_stats(ctl_vsi);
}
- i40e_vsi_reset_stats(ctl_vsi);
/* create the VEB in the switch and move the VSI onto the VEB */
ret = i40e_add_veb(veb, ctl_vsi);
if (ret)
- goto end_reconstitute;
+ return ret;
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
- veb->bridge_mode = BRIDGE_MODE_VEB;
- else
- veb->bridge_mode = BRIDGE_MODE_VEPA;
- i40e_config_bridge_mode(veb);
+ if (veb->uplink_seid) {
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
+ veb->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ i40e_config_bridge_mode(veb);
+ }
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if (vsi == ctl_vsi)
continue;
- if (pf->vsi[v]->veb_idx == veb->idx) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
+ if (vsi->veb_idx == veb->idx) {
vsi->uplink_seid = veb->seid;
ret = i40e_add_vsi(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of vsi_idx %d failed: %d\n",
v, ret);
- goto end_reconstitute;
+ return ret;
}
i40e_vsi_reset_stats(vsi);
}
}
- /* create any VEBs attached to this VEB - RECURSION */
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
- pf->veb[veb_idx]->uplink_seid = veb->seid;
- ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
- if (ret)
- break;
- }
- }
-
-end_reconstitute:
return ret;
}
@@ -10528,7 +10561,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
**/
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi;
+ struct i40e_vsi *main_vsi, *vsi;
/* quick workaround for an NVM issue that leaves a critical register
* uninitialized
@@ -10545,7 +10578,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
}
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return;
/* find existing VSI and see if it needs configuring */
@@ -10553,12 +10586,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
/* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
- pf->vsi[pf->lan_vsi]->seid, 0);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, main_vsi->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
return;
}
}
@@ -10704,6 +10737,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
int ret = 0;
u32 v;
@@ -10718,11 +10752,9 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v]) {
- i40e_clean_xps_state(pf->vsi[v]);
- pf->vsi[v]->seid = 0;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ i40e_clean_xps_state(vsi);
+ vsi->seid = 0;
}
i40e_shutdown_adminq(&pf->hw);
@@ -10834,15 +10866,16 @@ static int i40e_reset(struct i40e_pf *pf)
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
+ struct i40e_veb *veb;
int ret;
u32 val;
int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
is_recovery_mode_reported)
- i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+ i40e_set_ethtool_ops(vsi->netdev);
if (test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_RECOVERY_MODE, pf->state))
@@ -10936,14 +10969,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_aq_set_dcb_parameters(hw, false, NULL);
dev_warn(&pf->pdev->dev,
"DCB is not supported for X710-T*L 2.5/5G speeds\n");
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
} else {
i40e_aq_set_dcb_parameters(hw, true, NULL);
ret = i40e_init_pf_dcb(pf);
if (ret) {
dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
ret);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Continue without DCB enabled */
}
}
@@ -10977,35 +11010,29 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
*/
if (vsi->uplink_seid != pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
- /* find the one VEB connected to the MAC, and find orphans */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
-
- if (pf->veb[v]->uplink_seid == pf->mac_seid ||
- pf->veb[v]->uplink_seid == 0) {
- ret = i40e_reconstitute_veb(pf->veb[v]);
- if (!ret)
- continue;
+ /* Rebuild VEBs */
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_reconstitute_veb(veb);
+ if (!ret)
+ continue;
- /* If Main VEB failed, we're in deep doodoo,
- * so give up rebuilding the switch and set up
- * for minimal rebuild of PF VSI.
- * If orphan failed, we'll report the error
- * but try to keep going.
- */
- if (pf->veb[v]->uplink_seid == pf->mac_seid) {
- dev_info(&pf->pdev->dev,
- "rebuild of switch failed: %d, will try to set up simple PF connection\n",
- ret);
- vsi->uplink_seid = pf->mac_seid;
- break;
- } else if (pf->veb[v]->uplink_seid == 0) {
- dev_info(&pf->pdev->dev,
- "rebuild of orphan VEB failed: %d\n",
- ret);
- }
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (veb->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ vsi->uplink_seid = pf->mac_seid;
+ break;
+ } else if (veb->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
}
}
}
@@ -11064,7 +11091,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
wr32(hw, I40E_REG_MSS, val);
}
- if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+ if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
@@ -11074,7 +11101,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
ret = i40e_setup_misc_vector(pf);
if (ret)
goto end_unlock;
@@ -11181,14 +11208,10 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
- u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
- I40E_GL_MDET_TX_PF_NUM_SHIFT;
- u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
- I40E_GL_MDET_TX_VF_NUM_SHIFT;
- u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
- I40E_GL_MDET_TX_EVENT_SHIFT;
- u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
- I40E_GL_MDET_TX_QUEUE_SHIFT) -
+ u8 pf_num = FIELD_GET(I40E_GL_MDET_TX_PF_NUM_MASK, reg);
+ u16 vf_num = FIELD_GET(I40E_GL_MDET_TX_VF_NUM_MASK, reg);
+ u8 event = FIELD_GET(I40E_GL_MDET_TX_EVENT_MASK, reg);
+ u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) -
pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
@@ -11198,12 +11221,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
reg = rd32(hw, I40E_GL_MDET_RX);
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
- u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
- I40E_GL_MDET_RX_FUNCTION_SHIFT;
- u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
- I40E_GL_MDET_RX_EVENT_SHIFT;
- u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
- I40E_GL_MDET_RX_QUEUE_SHIFT) -
+ u8 func = FIELD_GET(I40E_GL_MDET_RX_FUNCTION_MASK, reg);
+ u8 event = FIELD_GET(I40E_GL_MDET_RX_EVENT_MASK, reg);
+ u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) -
pf->hw.func_caps.base_queue;
if (netif_msg_rx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
@@ -11279,7 +11299,7 @@ static void i40e_service_task(struct work_struct *work)
return;
if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
- i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+ i40e_detect_recover_hung(pf);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
@@ -11288,14 +11308,12 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
/* Client subtask will reopen next time through. */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
- true);
+ i40e_notify_client_of_netdev_close(pf, true);
} else {
i40e_client_subtask(pf);
if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
pf->state))
- i40e_notify_client_of_l2_param_changes(
- pf->vsi[pf->lan_vsi]);
+ i40e_notify_client_of_l2_param_changes(pf);
}
i40e_sync_filters_subtask(pf);
} else {
@@ -11349,7 +11367,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
if (!vsi->num_rx_desc)
vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
I40E_REQ_DESCRIPTOR_MULTIPLE);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
vsi->num_q_vectors = pf->num_lan_msix;
else
vsi->num_q_vectors = 1;
@@ -11667,7 +11685,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->count = vsi->num_tx_desc;
ring->size = 0;
ring->dcb_tc = 0;
- if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
WRITE_ONCE(vsi->tx_rings[i], ring++);
@@ -11684,7 +11702,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->count = vsi->num_tx_desc;
ring->size = 0;
ring->dcb_tc = 0;
- if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
@@ -11748,7 +11766,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
int v_actual;
int iwarp_requested = 0;
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return -ENODEV;
/* The number of vectors we'll request will be comprised of:
@@ -11787,7 +11805,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left -= pf->num_lan_msix;
/* reserve one vector for sideband flow director */
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
if (vectors_left) {
pf->num_fdsb_msix = 1;
v_budget++;
@@ -11798,7 +11816,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/* can we reserve enough for iWARP? */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
iwarp_requested = pf->num_iwarp_msix;
if (!vectors_left)
@@ -11810,7 +11828,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/* any vectors left over go for VMDq support */
- if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) {
if (!vectors_left) {
pf->num_vmdq_msix = 0;
pf->num_vmdq_qps = 0;
@@ -11867,7 +11885,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
v_actual = i40e_reserve_msix_vectors(pf, v_budget);
if (v_actual < I40E_MIN_MSIX) {
- pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
pci_disable_msix(pf->pdev);
@@ -11905,7 +11923,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_msix = 1;
break;
case 3:
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->num_lan_msix = 1;
pf->num_iwarp_msix = 1;
} else {
@@ -11913,7 +11931,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
break;
default:
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->num_iwarp_msix = min_t(int, (vec / 3),
iwarp_requested);
pf->num_vmdq_vsis = min_t(int, (vec / 3),
@@ -11922,7 +11940,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_vsis = min_t(int, (vec / 2),
I40E_DEFAULT_NUM_VMDQ_VSI);
}
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
pf->num_fdsb_msix = 1;
vec--;
}
@@ -11934,22 +11952,20 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- (pf->num_fdsb_msix == 0)) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) {
dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
- if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
- (pf->num_vmdq_msix == 0)) {
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) {
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
}
- if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
- (pf->num_iwarp_msix == 0)) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
+ pf->num_iwarp_msix == 0) {
dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
}
i40e_debug(&pf->hw, I40E_DEBUG_INIT,
"MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
@@ -12003,9 +12019,9 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
int err, v_idx, num_q_vectors;
/* if not MSIX, give the one vector only to the LAN VSI */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_q_vectors = vsi->num_q_vectors;
- else if (vsi == pf->vsi[pf->lan_vsi])
+ else if (vsi->type == I40E_VSI_MAIN)
num_q_vectors = 1;
else
return -EINVAL;
@@ -12034,38 +12050,39 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
int vectors = 0;
ssize_t size;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
vectors = i40e_init_msix(pf);
if (vectors < 0) {
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
}
}
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
vectors = pci_enable_msi(pf->pdev);
if (vectors < 0) {
dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
vectors);
- pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+ clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
}
vectors = 1; /* one MSI or Legacy vector */
}
- if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+ if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
/* set up vector assignment tracking */
@@ -12092,13 +12109,15 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
*/
static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int err, i;
/* We cleared the MSI and MSI-X flags when disabling the old interrupt
* scheme. We need to re-enabled them here in order to attempt to
* re-acquire the MSI or MSI-X vectors
*/
- pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+ set_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
err = i40e_init_interrupt_scheme(pf);
if (err)
@@ -12107,20 +12126,19 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
/* Now that we've re-acquired IRQs, we need to remap the vectors and
* rings together again.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
- if (err)
- goto err_unwind;
- i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ err = i40e_vsi_alloc_q_vectors(vsi);
+ if (err)
+ goto err_unwind;
+
+ i40e_vsi_map_rings_to_vectors(vsi);
}
err = i40e_setup_misc_vector(pf);
if (err)
goto err_unwind;
- if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
i40e_client_update_msix_info(pf);
return 0;
@@ -12148,7 +12166,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
{
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
err = i40e_setup_misc_vector(pf);
if (err) {
@@ -12158,7 +12176,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
return err;
}
} else {
- u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
+ u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED;
err = request_irq(pf->pdev->irq, i40e_intr, flags,
pf->int_name, pf);
@@ -12362,7 +12380,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
struct i40e_pf *pf = vsi->back;
- if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return i40e_config_rss_aq(vsi, seed, lut, lut_size);
else
return i40e_config_rss_reg(vsi, seed, lut, lut_size);
@@ -12381,7 +12399,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
struct i40e_pf *pf = vsi->back;
- if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return i40e_get_rss_aq(vsi, seed, lut, lut_size);
else
return i40e_get_rss_reg(vsi, seed, lut, lut_size);
@@ -12409,7 +12427,7 @@ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
**/
static int i40e_pf_config_rss(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
u8 seed[I40E_HKEY_ARRAY_SIZE];
u8 *lut;
struct i40e_hw *hw = &pf->hw;
@@ -12481,10 +12499,10 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
int new_rss_size;
- if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags))
return 0;
queue_count = min_t(int, queue_count, num_online_cpus());
@@ -12717,9 +12735,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
u16 pow;
/* Set default capability flags */
- pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
- I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_MSIX_ENABLED;
+ bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS);
+ set_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_RX_DEF;
@@ -12739,14 +12757,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
if (pf->hw.func_caps.rss) {
- pf->flags |= I40E_FLAG_RSS_ENABLED;
+ set_bit(I40E_FLAG_RSS_ENA, pf->flags);
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
num_online_cpus());
}
/* MFP mode enabled */
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
+ set_bit(I40E_FLAG_MFP_ENA, pf->flags);
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
if (i40e_get_partition_bw_setting(pf)) {
dev_warn(&pf->pdev->dev,
@@ -12763,84 +12781,31 @@ static int i40e_sw_init(struct i40e_pf *pf)
if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+ set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
pf->hw.num_partitions > 1)
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
else
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
pf->fdir_pf_filter_count =
pf->hw.func_caps.fd_filters_guaranteed;
pf->hw.fdir_shared_filter_count =
pf->hw.func_caps.fd_filters_best_effort;
}
- if (pf->hw.mac.type == I40E_MAC_X722) {
- pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
- I40E_HW_128_QP_RSS_CAPABLE |
- I40E_HW_ATR_EVICT_CAPABLE |
- I40E_HW_WB_ON_ITR_CAPABLE |
- I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
- I40E_HW_NO_PCI_LINK_CHECK |
- I40E_HW_USE_SET_LLDP_MIB |
- I40E_HW_GENEVE_OFFLOAD_CAPABLE |
- I40E_HW_PTP_L4_CAPABLE |
- I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
- I40E_HW_OUTER_UDP_CSUM_CAPABLE);
-
-#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
- if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
- I40E_FDEVICT_PCTYPE_DEFAULT) {
- dev_warn(&pf->pdev->dev,
- "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
- pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
- }
- } else if ((pf->hw.aq.api_maj_ver > 1) ||
- ((pf->hw.aq.api_maj_ver == 1) &&
- (pf->hw.aq.api_min_ver > 4))) {
- /* Supported in FW API version higher than 1.4 */
- pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
- }
-
/* Enable HW ATR eviction if possible */
- if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
- pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
-
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
- (pf->hw.aq.fw_maj_ver < 4))) {
- pf->hw_features |= I40E_HW_RESTART_AUTONEG;
- /* No DCB support for FW < v4.33 */
- pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
- }
-
- /* Disable FW LLDP if FW < v4.3 */
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
- (pf->hw.aq.fw_maj_ver < 4)))
- pf->hw_features |= I40E_HW_STOP_FW_LLDP;
-
- /* Use the FW Set LLDP MIB API if FW > v4.40 */
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
- (pf->hw.aq.fw_maj_ver >= 5)))
- pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
-
- /* Enable PTP L4 if FW > v6.0 */
- if (pf->hw.mac.type == I40E_MAC_XL710 &&
- pf->hw.aq.fw_maj_ver >= 6)
- pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
+ if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps))
+ set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags);
if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
- pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ set_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
- pf->flags |= I40E_FLAG_IWARP_ENABLED;
+ set_bit(I40E_FLAG_IWARP_ENA, pf->flags);
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
@@ -12850,25 +12815,23 @@ static int i40e_sw_init(struct i40e_pf *pf)
* if NPAR is functioning so unset this hw flag in this case.
*/
if (pf->hw.mac.type == I40E_MAC_XL710 &&
- pf->hw.func_caps.npar_enable &&
- (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
- pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ pf->hw.func_caps.npar_enable)
+ clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps);
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
- pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+ set_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
}
#endif /* CONFIG_PCI_IOV */
- pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
/* By default FW has this off for performance reasons */
- pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+ clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
/* set up queue assignment tracking */
size = sizeof(struct i40e_lump_tracking)
@@ -12887,8 +12850,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Link down on close must be on when total port shutdown
* is enabled for a given port
*/
- pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
+ set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
+ set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
dev_info(&pf->pdev->dev,
"total-port-shutdown was enabled, link-down-on-close is forced on\n");
}
@@ -12914,31 +12877,31 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
*/
if (features & NETIF_F_NTUPLE) {
/* Enable filters and mark for reset */
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
need_reset = true;
/* enable FD_SB only if there is MSI-X vector and no cloud
* filters exist
*/
if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
} else {
/* turn off filters, mark for reset and clear SW filter list */
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
need_reset = true;
i40e_fdir_filter_exit(pf);
}
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
/* reset fd counters */
pf->fd_add_err = 0;
pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */
if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
}
@@ -13087,7 +13050,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
+ if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps))
return -EOPNOTSUPP;
ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
@@ -13116,7 +13079,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct i40e_pf *pf = np->vsi->back;
int err = 0;
- if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags))
return -EOPNOTSUPP;
if (vid) {
@@ -13170,38 +13133,31 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
struct nlattr *attr, *br_spec;
- int i, rem;
+ struct i40e_veb *veb;
+ int rem;
/* Only for PF VSI for now */
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* Find the HW bridge for PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- mode = nla_get_u16(attr);
if ((mode != BRIDGE_MODE_VEPA) &&
(mode != BRIDGE_MODE_VEB))
return -EINVAL;
/* Insert a new HW bridge */
if (!veb) {
- veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
if (veb) {
veb->bridge_mode = mode;
@@ -13216,9 +13172,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
veb->bridge_mode = mode;
/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
if (mode == BRIDGE_MODE_VEB)
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
break;
}
@@ -13247,19 +13203,14 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
- int i;
+ struct i40e_veb *veb;
/* Only for PF VSI for now */
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* Find the HW bridge for the PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb)
return 0;
@@ -13293,12 +13244,12 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
@@ -13552,7 +13503,7 @@ static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
struct i40e_hw *hw = &pf->hw;
/* All rings in a qp belong to the same qvector. */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
else
i40e_irq_dynamic_enable_icr0(pf);
@@ -13577,7 +13528,7 @@ static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
*
* All rings in a qp belong to the same qvector.
*/
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
@@ -13608,9 +13559,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
return err;
i40e_queue_pair_disable_irq(vsi, queue_pair);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
- i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
@@ -13762,7 +13713,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_RXCSUM |
0;
- if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
@@ -13798,7 +13749,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
@@ -13837,9 +13788,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* the end, which is 4 bytes long, so force truncation of the
* original name by IFNAMSIZ - 4
*/
- snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
- IFNAMSIZ - 4,
- pf->vsi[pf->lan_vsi]->netdev->name);
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
+ snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4,
+ main_vsi->netdev->name);
eth_random_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -13989,7 +13941,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* negative logic - if it's set, we need to fiddle with
* the VSI to disable source pruning.
*/
- if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
+ if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) {
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
@@ -14011,7 +13963,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
}
/* MFP mode setup queue map and update VSI */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
!(pf->hw.func_caps.iscsi)) { /* NIC type PF */
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
@@ -14059,7 +14011,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) &&
(i40e_is_vsi_uplink_mode_veb(vsi))) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
@@ -14107,7 +14059,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
- if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |=
@@ -14193,7 +14145,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
- struct i40e_veb *veb = NULL;
+ struct i40e_veb *veb;
struct i40e_pf *pf;
u16 uplink_seid;
int i, n, bkt;
@@ -14206,15 +14158,13 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
vsi->seid, vsi->uplink_seid);
return -ENODEV;
}
- if (vsi == pf->vsi[pf->lan_vsi] &&
- !test_bit(__I40E_DOWN, pf->state)) {
+ if (vsi->type == I40E_VSI_MAIN && !test_bit(__I40E_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
set_bit(__I40E_VSI_RELEASING, vsi->state);
uplink_seid = vsi->uplink_seid;
- if (vsi->type == I40E_VSI_MAIN)
- i40e_devlink_destroy_port(pf);
+
if (vsi->type != I40E_VSI_SRIOV) {
if (vsi->netdev_registered) {
vsi->netdev_registered = false;
@@ -14228,6 +14178,9 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
}
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_devlink_destroy_port(pf);
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* clear the sync flag on all filters */
@@ -14255,29 +14208,28 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
/* If this was the last thing on the VEB, except for the
* controlling VSI, remove the VEB, which puts the controlling
- * VSI onto the next level down in the switch.
+ * VSI onto the uplink port.
*
* Well, okay, there's one more exception here: don't remove
- * the orphan VEBs yet. We'll wait for an explicit remove request
+ * the floating VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] &&
- pf->vsi[i]->uplink_seid == uplink_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- n++; /* count the VSIs */
- }
- }
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == uplink_seid)
- n++; /* count the VEBs */
- if (pf->veb[i]->seid == uplink_seid)
- veb = pf->veb[i];
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
+ if (veb && veb->uplink_seid) {
+ n = 0;
+
+ /* Count non-controlling VSIs present on the VEB */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == uplink_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ n++;
+
+ /* If there is no VSI except the control one then release
+ * the VEB and put the control VSI onto VEB uplink.
+ */
+ if (!n)
+ i40e_veb_release(veb);
}
- if (n == 0 && veb && veb->uplink_seid != 0)
- i40e_veb_release(veb);
return 0;
}
@@ -14321,7 +14273,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
/* In Legacy mode, we do not have to get any other vector since we
* piggyback on the misc/ICR0 for queue interrupts.
*/
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return ret;
if (vsi->num_q_vectors)
vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
@@ -14350,9 +14302,9 @@ vector_setup_out:
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
+ struct i40e_vsi *main_vsi;
u16 alloc_queue_pairs;
struct i40e_pf *pf;
- u8 enabled_tc;
int ret;
if (!vsi)
@@ -14384,10 +14336,10 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
/* Update the FW view of the VSI. Force a reset of TC and queue
* layout configurations.
*/
- enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
- pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
- pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
- i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ main_vsi->seid = pf->main_vsi_seid;
+ i40e_vsi_reconfig_tc(main_vsi);
+
if (vsi->type == I40E_VSI_MAIN)
i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
@@ -14402,14 +14354,14 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
err_rings:
i40e_vsi_free_q_vectors(vsi);
- if (vsi->type == I40E_VSI_MAIN)
- i40e_devlink_destroy_port(pf);
if (vsi->netdev_registered) {
vsi->netdev_registered = false;
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_devlink_destroy_port(pf);
i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
err_vsi:
i40e_vsi_clear(vsi);
@@ -14435,8 +14387,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
u16 alloc_queue_pairs;
- int ret, i;
int v_idx;
+ int ret;
/* The requested uplink_seid must be either
* - the PF's port seid
@@ -14451,21 +14403,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
*
* Find which uplink_seid we were given and create a new VEB if needed
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
- veb = pf->veb[i];
- break;
- }
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
if (!veb && uplink_seid != pf->mac_seid) {
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
- vsi = pf->vsi[i];
- break;
- }
- }
+ vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid);
if (!vsi) {
dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
uplink_seid);
@@ -14473,13 +14413,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
if (vsi->uplink_seid == pf->mac_seid)
- veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, pf->mac_seid, vsi->seid,
vsi->tc_config.enabled_tc);
else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
- veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
if (veb) {
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
+ if (vsi->type != I40E_VSI_MAIN) {
dev_info(&vsi->back->pdev->dev,
"New VSI creation error, uplink seid of LAN VSI expected.\n");
return NULL;
@@ -14488,16 +14428,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
* already enabled, in which case we can't force VEPA
* mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
veb->bridge_mode = BRIDGE_MODE_VEPA;
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
}
i40e_config_bridge_mode(veb);
}
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb) {
dev_info(&pf->pdev->dev, "couldn't add VEB\n");
return NULL;
@@ -14586,12 +14523,16 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
break;
}
- if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
- (vsi->type == I40E_VSI_VMDQ2)) {
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
+ vsi->type == I40E_VSI_VMDQ2) {
ret = i40e_vsi_config_rss(vsi);
+ if (ret)
+ goto err_config;
}
return vsi;
+err_config:
+ i40e_vsi_clear_rings(vsi);
err_rings:
i40e_vsi_free_q_vectors(vsi);
err_msix:
@@ -14723,29 +14664,24 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
struct i40e_pf *pf = branch->pf;
u16 branch_seid = branch->seid;
u16 veb_idx = branch->idx;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* release any VEBs on this VEB - RECURSION */
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == branch->seid)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == branch->seid)
+ i40e_switch_branch_release(veb);
/* Release the VSIs on this VEB, but not the owner VSI.
*
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!pf->vsi[i])
- continue;
- if (pf->vsi[i]->uplink_seid == branch_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- i40e_vsi_release(pf->vsi[i]);
- }
- }
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == branch_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ i40e_vsi_release(vsi);
/* There's one corner case where the VEB might not have been
* removed, so double check it here and remove it if needed.
@@ -14783,38 +14719,35 @@ static void i40e_veb_clear(struct i40e_veb *veb)
**/
void i40e_veb_release(struct i40e_veb *veb)
{
- struct i40e_vsi *vsi = NULL;
+ struct i40e_vsi *vsi, *vsi_it;
struct i40e_pf *pf;
int i, n = 0;
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ i40e_pf_for_each_vsi(pf, i, vsi_it)
+ if (vsi_it->uplink_seid == veb->seid) {
+ if (vsi_it->flags & I40E_VSI_FLAG_VEB_OWNER)
+ vsi = vsi_it;
n++;
- vsi = pf->vsi[i];
}
- }
- if (n != 1) {
+
+ /* Floating VEB has to be empty and regular one must have
+ * single owner VSI.
+ */
+ if ((veb->uplink_seid && n != 1) || (!veb->uplink_seid && n != 0)) {
dev_info(&pf->pdev->dev,
"can't remove VEB %d with %d VSIs left\n",
veb->seid, n);
return;
}
- /* move the remaining VSI to uplink veb */
- vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ /* For regular VEB move the owner VSI to uplink port */
if (veb->uplink_seid) {
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
vsi->uplink_seid = veb->uplink_seid;
- if (veb->uplink_seid == pf->mac_seid)
- vsi->veb_idx = I40E_NO_VEB;
- else
- vsi->veb_idx = veb->veb_idx;
- } else {
- /* floating VEB */
- vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ vsi->veb_idx = I40E_NO_VEB;
}
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
@@ -14829,11 +14762,11 @@ void i40e_veb_release(struct i40e_veb *veb)
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
struct i40e_pf *pf = veb->pf;
- bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
+ bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
int ret;
- ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, false,
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0,
+ veb->enabled_tc, vsi ? false : true,
&veb->seid, enable_stats, NULL);
/* get a VEB from the hardware */
@@ -14865,9 +14798,11 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
return -ENOENT;
}
- vsi->uplink_seid = veb->seid;
- vsi->veb_idx = veb->idx;
- vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ if (vsi) {
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ }
return 0;
}
@@ -14875,7 +14810,6 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/**
* i40e_veb_setup - Set up a VEB
* @pf: board private structure
- * @flags: VEB setup flags
* @uplink_seid: the switch element to link to
* @vsi_seid: the initial VSI seid
* @enabled_tc: Enabled TC bit-map
@@ -14888,12 +14822,12 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
* Returns pointer to the successfully allocated VEB sw struct on
* success, otherwise returns NULL on failure.
**/
-struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
- u16 uplink_seid, u16 vsi_seid,
- u8 enabled_tc)
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid,
+ u16 vsi_seid, u8 enabled_tc)
{
- struct i40e_veb *veb, *uplink_veb = NULL;
- int vsi_idx, veb_idx;
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb;
+ int veb_idx;
int ret;
/* if one seid is 0, the other must be 0 to create a floating relay */
@@ -14906,26 +14840,11 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
- if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
- break;
- if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
- dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
- vsi_seid);
- return NULL;
- }
-
- if (uplink_seid && uplink_seid != pf->mac_seid) {
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] &&
- pf->veb[veb_idx]->seid == uplink_seid) {
- uplink_veb = pf->veb[veb_idx];
- break;
- }
- }
- if (!uplink_veb) {
- dev_info(&pf->pdev->dev,
- "uplink seid %d not found\n", uplink_seid);
+ if (vsi_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
return NULL;
}
}
@@ -14935,16 +14854,15 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
if (veb_idx < 0)
goto err_alloc;
veb = pf->veb[veb_idx];
- veb->flags = flags;
veb->uplink_seid = uplink_seid;
- veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
/* create the VEB in the switch */
- ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ ret = i40e_add_veb(veb, vsi);
if (ret)
goto err_veb;
- if (vsi_idx == pf->lan_vsi)
+
+ if (vsi && vsi->idx == pf->lan_vsi)
pf->lan_veb = veb->idx;
return veb;
@@ -14972,6 +14890,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
u8 element_type = ele->element_type;
u16 seid = le16_to_cpu(ele->seid);
+ struct i40e_veb *veb;
if (printconfig)
dev_info(&pf->pdev->dev,
@@ -14986,30 +14905,30 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
/* Main VEB? */
if (uplink_seid != pf->mac_seid)
break;
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_main_veb(pf);
+ if (!veb) {
int v;
/* find existing or else empty VEB */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
- pf->lan_veb = v;
- break;
- }
- }
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
+ if (veb) {
+ pf->lan_veb = veb->idx;
+ } else {
v = i40e_veb_mem_alloc(pf);
if (v < 0)
break;
pf->lan_veb = v;
}
}
- if (pf->lan_veb >= I40E_MAX_VEB)
+
+ /* Try to get again main VEB as pf->lan_veb may have changed */
+ veb = i40e_pf_get_main_veb(pf);
+ if (!veb)
break;
- pf->veb[pf->lan_veb]->seid = seid;
- pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
- pf->veb[pf->lan_veb]->pf = pf;
- pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
+ veb->seid = seid;
+ veb->uplink_seid = pf->mac_seid;
+ veb->pf = pf;
break;
case I40E_SWITCH_ELEMENT_TYPE_VSI:
if (num_reported != 1)
@@ -15018,12 +14937,11 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
* the PF's VSI
*/
pf->mac_seid = uplink_seid;
- pf->pf_seid = downlink_seid;
pf->main_vsi_seid = seid;
if (printconfig)
dev_info(&pf->pdev->dev,
"pf_seid=%d main_vsi_seid=%d\n",
- pf->pf_seid, pf->main_vsi_seid);
+ downlink_seid, pf->main_vsi_seid);
break;
case I40E_SWITCH_ELEMENT_TYPE_PF:
case I40E_SWITCH_ELEMENT_TYPE_VF:
@@ -15108,6 +15026,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
+ struct i40e_vsi *main_vsi;
u16 flags = 0;
int ret;
@@ -15129,7 +15048,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
*/
if ((pf->hw.pf_id == 0) &&
- !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) {
flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
pf->last_sw_conf_flags = flags;
}
@@ -15152,22 +15071,25 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
}
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI || reinit) {
- struct i40e_vsi *vsi = NULL;
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ if (!main_vsi || reinit) {
+ struct i40e_veb *veb;
u16 uplink_seid;
/* Set up the PF VSI associated with the PF's main VSI
* that is already in the HW switch
*/
- if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
- uplink_seid = pf->veb[pf->lan_veb]->seid;
+ veb = i40e_pf_get_main_veb(pf);
+ if (veb)
+ uplink_seid = veb->seid;
else
uplink_seid = pf->mac_seid;
- if (pf->lan_vsi == I40E_NO_VSI)
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (!main_vsi)
+ main_vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN,
+ uplink_seid, 0);
else if (reinit)
- vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
- if (!vsi) {
+ main_vsi = i40e_vsi_reinit_setup(main_vsi);
+ if (!main_vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
@@ -15175,13 +15097,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
}
} else {
/* force a reset of TC and queue layout configurations */
- u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
-
- pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
- pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
- i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ main_vsi->seid = pf->main_vsi_seid;
+ i40e_vsi_reconfig_tc(main_vsi);
}
- i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_vlan_stripping_disable(main_vsi);
i40e_fdir_sb_setup(pf);
@@ -15196,23 +15115,19 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
/* enable RSS in the HW, even for only one queue, as the stack can use
* the hash
*/
- if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+ if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i40e_pf_config_rss(pf);
/* fill in link information and enable LSE reporting */
i40e_link_event(pf);
- /* Initialize user-specific link properties */
- pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
- I40E_AQ_AN_COMPLETED) ? true : false);
-
i40e_ptp_init(pf);
if (!lock_acquired)
rtnl_lock();
/* repopulate tunnel port filters */
- udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
+ udp_tunnel_nic_reset_ntf(main_vsi->netdev);
if (!lock_acquired)
rtnl_unlock();
@@ -15238,42 +15153,42 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left = pf->hw.func_caps.num_tx_qp;
if ((queues_left == 1) ||
- !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->alloc_rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
- } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_CAPABLE))) {
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
+ } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) {
/* one qp for PF */
pf->alloc_rss_size = pf->num_lan_qps = 1;
queues_left -= pf->num_lan_qps;
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
} else {
/* Not enough queues for all TCs */
- if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
- (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED);
+ if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) &&
+ queues_left < I40E_MAX_TRAFFIC_CLASS) {
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
@@ -15286,24 +15201,24 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
}
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
if (queues_left > 1) {
queues_left -= 1; /* save 1 queue for FD */
} else {
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
}
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
pf->num_vf_qps && pf->num_req_vfs && queues_left) {
pf->num_req_vfs = min_t(int, pf->num_req_vfs,
(queues_left / pf->num_vf_qps));
queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
}
- if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) &&
pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
(queues_left / pf->num_vmdq_qps));
@@ -15314,7 +15229,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
pf->hw.func_caps.num_tx_qp,
- !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+ !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags),
pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
queues_left);
@@ -15338,7 +15253,8 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
/* Flow Director is enabled */
- if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ||
+ test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
settings->enable_fdir = true;
/* Ethtype and MACVLAN filters enabled for PF */
@@ -15355,6 +15271,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
#define REMAIN(__x) (INFO_STRING_LEN - (__x))
static void i40e_print_features(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
char *buf;
int i;
@@ -15368,23 +15285,22 @@ static void i40e_print_features(struct i40e_pf *pf)
i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
- pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs);
- if (pf->flags & I40E_FLAG_RSS_ENABLED)
+ pf->hw.func_caps.num_vsis, main_vsi->num_queue_pairs);
+ if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " RSS");
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
}
- if (pf->flags & I40E_FLAG_DCB_CAPABLE)
+ if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " DCB");
i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
i += scnprintf(&buf[i], REMAIN(i), " Geneve");
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " PTP");
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " VEB");
else
i += scnprintf(&buf[i], REMAIN(i), " VEPA");
@@ -15415,22 +15331,26 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
* @fec_cfg: FEC option to set in flags
* @flags: ptr to flags in which we set FEC option
**/
-void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
+void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags)
{
- if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
- *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO) {
+ set_bit(I40E_FLAG_RS_FEC, flags);
+ set_bit(I40E_FLAG_BASE_R_FEC, flags);
+ }
if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
(fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
- *flags |= I40E_FLAG_RS_FEC;
- *flags &= ~I40E_FLAG_BASE_R_FEC;
+ set_bit(I40E_FLAG_RS_FEC, flags);
+ clear_bit(I40E_FLAG_BASE_R_FEC, flags);
}
if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
(fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
- *flags |= I40E_FLAG_BASE_R_FEC;
- *flags &= ~I40E_FLAG_RS_FEC;
+ set_bit(I40E_FLAG_BASE_R_FEC, flags);
+ clear_bit(I40E_FLAG_RS_FEC, flags);
+ }
+ if (fec_cfg == 0) {
+ clear_bit(I40E_FLAG_RS_FEC, flags);
+ clear_bit(I40E_FLAG_BASE_R_FEC, flags);
}
- if (fec_cfg == 0)
- *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
}
/**
@@ -15672,9 +15592,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
#endif /* CONFIG_I40E_DCB */
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
struct i40e_hw *hw;
- static u16 pfs_found;
u16 wol_nvm_bits;
char nvm_ver[32];
u16 link_status;
@@ -15683,7 +15603,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
- u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -15752,7 +15671,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
hw->bus.bus_id = pdev->bus->number;
- pf->instance = pfs_found;
/* Select something other than the 802.1ad ethertype for the
* switch to use internally and drop on ingress.
@@ -15814,7 +15732,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
- pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
snprintf(pf->int_name, sizeof(pf->int_name) - 1,
"%s-%s:misc",
@@ -15856,15 +15773,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->vendor_id, hw->device_id, hw->subsystem_vendor_id,
hw->subsystem_device_id);
- if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
+ if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw) + 1))
dev_dbg(&pdev->dev,
"The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
hw->aq.api_maj_ver,
hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR,
I40E_FW_MINOR_VERSION(hw));
- else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
+ else if (i40e_is_aq_api_ver_lt(hw, 1, 4))
dev_info(&pdev->dev,
"The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
hw->aq.api_maj_ver,
@@ -15911,7 +15828,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* Ignore error return codes because if it was already disabled via
* hardware settings this will fail
*/
- if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
+ if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) {
dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
i40e_aq_stop_lldp(hw, true, false, NULL);
}
@@ -15928,7 +15845,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
if (is_valid_ether_addr(hw->mac.port_addr))
- pf->hw_features |= I40E_HW_PORT_ID_VALID;
+ set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps);
i40e_ptp_alloc_pins(pf);
pci_set_drvdata(pdev, pf);
@@ -15938,10 +15855,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
(!status &&
lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
- (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
- (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
+ (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) :
+ (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags));
dev_info(&pdev->dev,
- (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
+ test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
@@ -15951,7 +15868,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -16019,11 +15937,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_PCI_IOV
/* prep for VF support */
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
!test_bit(__I40E_BAD_EEPROM, pf->state)) {
if (pci_num_vf(pdev))
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
}
#endif
err = i40e_setup_pf_switch(pf, false, false);
@@ -16031,15 +15949,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
- INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
+
+ vsi = i40e_pf_get_main_vsi(pf);
+ INIT_LIST_HEAD(&vsi->ch_list);
/* if FDIR VSI was set up, start it now */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- i40e_vsi_open(pf->vsi[i]);
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_open(vsi);
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -16064,7 +15981,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
wr32(hw, I40E_REG_MSS, val);
}
- if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+ if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
@@ -16084,7 +16001,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
err = i40e_setup_misc_vector(pf);
if (err) {
dev_info(&pdev->dev,
@@ -16097,8 +16014,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_PCI_IOV
/* prep for VF support */
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
!test_bit(__I40E_BAD_EEPROM, pf->state)) {
/* disable link interrupts for VFs */
val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
@@ -16118,7 +16035,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif /* CONFIG_PCI_IOV */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
pf->num_iwarp_msix,
I40E_IWARP_IRQ_PILE_ID);
@@ -16126,7 +16043,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"failed to get tracking for %d vectors for IWARP err=%d\n",
pf->num_iwarp_msix, pf->iwarp_base_vector);
- pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
}
}
@@ -16140,7 +16057,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
round_jiffies(jiffies + pf->service_timer_period));
/* add this PF to client device list and launch a client service task */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
err = i40e_lan_add_device(pf);
if (err)
dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
@@ -16153,7 +16070,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* and will report PCI Gen 1 x 1 by default so don't bother
* checking them.
*/
- if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
+ if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) {
char speed[PCI_SPEED_SIZE] = "Unknown";
char width[PCI_WIDTH_SIZE] = "Unknown";
@@ -16207,7 +16124,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* set the FEC config due to the board capabilities */
- i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags);
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
@@ -16218,11 +16135,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
- val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
- I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
+ rd32(&pf->hw, I40E_PRTGL_SAH));
if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- i, val);
+ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
+ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16234,10 +16151,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->main_vsi_seid);
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
- (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
- pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
+ (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+ set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps);
if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
- pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
+ set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps);
/* print a string summarizing features */
i40e_print_features(pf);
@@ -16285,6 +16202,8 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int ret_code;
int i;
@@ -16306,10 +16225,10 @@ static void i40e_remove(struct pci_dev *pdev)
usleep_range(1000, 2000);
set_bit(__I40E_IN_REMOVE, pf->state);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
}
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
@@ -16335,36 +16254,31 @@ static void i40e_remove(struct pci_dev *pdev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
* This will leave only the PF's VSI remaining.
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
-
- if (pf->veb[i]->uplink_seid == pf->mac_seid ||
- pf->veb[i]->uplink_seid == 0)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == pf->mac_seid ||
+ veb->uplink_seid == 0)
+ i40e_switch_branch_release(veb);
/* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
- for (i = pf->num_alloc_vsi; i--;)
- if (pf->vsi[i]) {
- i40e_vsi_close(pf->vsi[i]);
- i40e_vsi_release(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ i40e_vsi_close(vsi);
+ i40e_vsi_release(vsi);
+ pf->vsi[i] = NULL;
+ }
i40e_cloud_filter_exit(pf);
/* remove attached clients */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
ret_code = i40e_lan_del_device(pf);
if (ret_code)
dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
@@ -16383,7 +16297,7 @@ static void i40e_remove(struct pci_dev *pdev)
unmap:
/* Free MSI/legacy interrupt 0 when in recovery mode. */
if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
free_irq(pf->pdev->irq, pf);
/* shutdown the adminq */
@@ -16396,18 +16310,17 @@ unmap:
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
rtnl_lock();
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
- i40e_vsi_clear_rings(pf->vsi[i]);
- i40e_vsi_clear(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_clear(vsi);
+ pf->vsi[i] = NULL;
}
rtnl_unlock();
- for (i = 0; i < I40E_MAX_VEB; i++) {
- kfree(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb) {
+ kfree(veb);
pf->veb[i] = NULL;
}
@@ -16510,6 +16423,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
return;
i40e_reset_and_rebuild(pf, false, false);
+#ifdef CONFIG_PCI_IOV
+ i40e_restore_all_vfs_msi_state(pdev);
+#endif /* CONFIG_PCI_IOV */
}
/**
@@ -16537,15 +16453,15 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
**/
static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
u8 mac_addr[6];
u16 flags = 0;
int ret;
/* Get current MAC address in case it's an LAA */
- if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
- ether_addr_copy(mac_addr,
- pf->vsi[pf->lan_vsi]->netdev->dev_addr);
+ if (main_vsi && main_vsi->netdev) {
+ ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
} else {
dev_err(&pf->pdev->dev,
"Failed to retrieve MAC address; using default\n");
@@ -16597,9 +16513,10 @@ static void i40e_shutdown(struct pci_dev *pdev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
- if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
i40e_enable_mc_magic_wake(pf);
i40e_prep_for_reset(pf);
@@ -16611,7 +16528,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
/* Free MSI/legacy interrupt 0 when in recovery mode. */
if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
free_irq(pf->pdev->irq, pf);
/* Since we're going to destroy queues during the
@@ -16632,7 +16549,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
* i40e_suspend - PM callback for moving to D3
* @dev: generic device information structure
**/
-static int __maybe_unused i40e_suspend(struct device *dev)
+static int i40e_suspend(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
struct i40e_hw *hw = &pf->hw;
@@ -16650,9 +16567,10 @@ static int __maybe_unused i40e_suspend(struct device *dev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
- if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
i40e_enable_mc_magic_wake(pf);
/* Since we're going to destroy queues during the
@@ -16682,7 +16600,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
* i40e_resume - PM callback for waking up from D3
* @dev: generic device information structure
**/
-static int __maybe_unused i40e_resume(struct device *dev)
+static int i40e_resume(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
int err;
@@ -16728,16 +16646,14 @@ static const struct pci_error_handlers i40e_err_handler = {
.resume = i40e_pci_error_resume,
};
-static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
static struct pci_driver i40e_driver = {
.name = i40e_driver_name,
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
- .driver = {
- .pm = &i40e_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&i40e_pm_ops),
.shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
@@ -16763,7 +16679,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 77cdbfc19d47..7f0936f4e05e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include "i40e_alloc.h"
#include "i40e_prototype.h"
@@ -26,8 +27,7 @@ int i40e_init_nvm(struct i40e_hw *hw)
* as the blank mode may be used in the factory line.
*/
gens = rd32(hw, I40E_GLNVM_GENS);
- sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
- I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ sr_size = FIELD_GET(I40E_GLNVM_GENS_SR_SIZE_MASK, gens);
/* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
@@ -193,9 +193,8 @@ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
- *data = (u16)((sr_reg &
- I40E_GLNVM_SRDATA_RDDATA_MASK)
- >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ *data = FIELD_GET(I40E_GLNVM_SRDATA_RDDATA_MASK,
+ sr_reg);
}
}
if (ret_code)
@@ -291,7 +290,7 @@ static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
static int __i40e_read_nvm_word(struct i40e_hw *hw,
u16 offset, u16 *data)
{
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps))
return i40e_read_nvm_word_aq(hw, offset, data);
return i40e_read_nvm_word_srctl(hw, offset, data);
@@ -310,14 +309,14 @@ int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
int ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps))
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
- if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps))
i40e_release_nvm(hw);
return ret_code;
@@ -499,7 +498,7 @@ static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
u16 offset, u16 *words,
u16 *data)
{
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps))
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
@@ -521,7 +520,7 @@ int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
int ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
@@ -735,49 +734,18 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw,
return ret_code;
}
-static int i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
-static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static inline u8 i40e_nvmupd_get_module(u32 val)
+static u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
}
static inline u8 i40e_nvmupd_get_transaction(u32 val)
{
- return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+ return FIELD_GET(I40E_NVM_TRANS_MASK, val);
}
static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
{
- return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
- I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+ return FIELD_GET(I40E_NVM_PRESERVATION_FLAGS_MASK, val);
}
static const char * const i40e_nvm_update_state_str[] = {
@@ -801,121 +769,408 @@ static const char * const i40e_nvm_update_state_str[] = {
};
/**
- * i40e_nvmupd_command - Process an NVM update command
+ * i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command
- * @bytes: pointer to the data buffer
+ * @cmd: pointer to nvm update command buffer
* @perrno: pointer to return error code
*
- * Dispatches command depending on what update state is current
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
**/
-int i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static enum i40e_nvmupd_cmd
+i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd,
+ int *perrno)
{
enum i40e_nvmupd_cmd upd_cmd;
- int status;
-
- /* assume success */
- *perrno = 0;
+ u8 module, transaction;
- /* early check for status command and debug msgs */
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ /* anything that doesn't match a recognized case is an error */
+ upd_cmd = I40E_NVMUPD_INVALID;
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
- i40e_nvm_update_state_str[upd_cmd],
- hw->nvmupd_state,
- hw->nvm_release_on_done, hw->nvm_wait_opcode,
- cmd->command, cmd->config, cmd->offset, cmd->data_size);
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
- if (upd_cmd == I40E_NVMUPD_INVALID) {
- *perrno = -EFAULT;
+ /* limits on data size */
+ if (cmd->data_size < 1 || cmd->data_size > I40E_NVMUPD_MAX_DATA) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command returns %d errno %d\n",
- upd_cmd, *perrno);
+ "%s data_size %d\n", __func__, cmd->data_size);
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
}
- /* a status request returns immediately rather than
- * going into the state machine
- */
- if (upd_cmd == I40E_NVMUPD_STATUS) {
- if (!cmd->data_size) {
- *perrno = -EFAULT;
- return -EINVAL;
+ switch (cmd->command) {
+ case I40E_NVM_READ:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_READ_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_READ_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_READ_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_READ_SA;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
+ break;
- bytes[0] = hw->nvmupd_state;
-
- if (cmd->data_size >= 4) {
- bytes[1] = 0;
- *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ case I40E_NVM_WRITE:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_WRITE_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_WRITE_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_WRITE_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_WRITE_SA;
+ break;
+ case I40E_NVM_ERA:
+ upd_cmd = I40E_NVMUPD_WRITE_ERA;
+ break;
+ case I40E_NVM_CSUM:
+ upd_cmd = I40E_NVMUPD_CSUM_CON;
+ break;
+ case (I40E_NVM_CSUM | I40E_NVM_SA):
+ upd_cmd = I40E_NVMUPD_CSUM_SA;
+ break;
+ case (I40E_NVM_CSUM | I40E_NVM_LCB):
+ upd_cmd = I40E_NVMUPD_CSUM_LCB;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
}
+ break;
+ }
- /* Clear error status on read */
- if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ return upd_cmd;
+}
- return 0;
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
- /* Clear status even it is not read and log */
- if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ u8 preservation_flags;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_update_nvm(hw, module, cmd->offset,
+ (u16)cmd->data_size, bytes, last,
+ preservation_flags, &cmd_details);
+ if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
- /* Acquire lock to prevent race condition where adminq_task
- * can execute after i40e_nvmupd_nvm_read/write but before state
- * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
- *
- * During NVMUpdate, it is observed that lock could be held for
- * ~5ms for most commands. However lock is held for ~60ms for
- * NVMUPD_CSUM_LCB command.
- */
- mutex_lock(&hw->aq.arq_mutex);
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT:
- status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
- break;
+ return status;
+}
- case I40E_NVMUPD_STATE_READING:
- status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
- break;
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status;
+ bool last;
- case I40E_NVMUPD_STATE_WRITING:
- status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
- break;
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
- case I40E_NVMUPD_STATE_INIT_WAIT:
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- /* if we need to stop waiting for an event, clear
- * the wait info and return before doing anything else
- */
- if (cmd->offset == 0xffff) {
- i40e_nvmupd_clear_wait_state(hw);
- status = 0;
- break;
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ bytes, last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+ int status;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return 0;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return -EINVAL;
+ }
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
}
- status = -EBUSY;
- *perrno = -EBUSY;
- break;
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
- default:
- /* invalid state, should never happen */
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: no such state %d\n", hw->nvmupd_state);
- status = -EOPNOTSUPP;
- *perrno = -ESRCH;
- break;
+ "%s err %pe aq_err %s\n",
+ __func__, ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
+ }
+
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
- mutex_unlock(&hw->aq.arq_mutex);
return status;
}
/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return -EINVAL;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+ return 0;
+}
+
+/**
* i40e_nvmupd_state_init - Handle NVM update state Init
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -939,7 +1194,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
@@ -950,7 +1205,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
if (status)
@@ -964,7 +1219,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
if (status) {
@@ -981,7 +1236,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status) {
@@ -998,7 +1253,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status) {
@@ -1014,7 +1269,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
@@ -1187,7 +1442,7 @@ retry:
* so here we try to reacquire the semaphore then retry the write.
* We only do one retry, then give up.
*/
- if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY &&
!retry_attempt) {
u32 old_asq_status = hw->aq.asq_last_status;
int old_status = status;
@@ -1217,457 +1472,168 @@ retry:
}
/**
- * i40e_nvmupd_clear_wait_state - clear wait state on hw
- * @hw: pointer to the hardware structure
- **/
-void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
-{
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n",
- hw->nvm_wait_opcode);
-
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = false;
- }
- hw->nvm_wait_opcode = 0;
-
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
-}
-
-/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
- * @hw: pointer to the hardware structure
- * @opcode: the event that just happened
- * @desc: AdminQ descriptor
- **/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc)
-{
- u32 aq_desc_len = sizeof(struct i40e_aq_desc);
-
- if (opcode == hw->nvm_wait_opcode) {
- memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
- i40e_nvmupd_clear_wait_state(hw);
- }
-}
-
-/**
- * i40e_nvmupd_validate_command - Validate given command
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * Return one of the valid command types or I40E_NVMUPD_INVALID
- **/
-static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
-{
- enum i40e_nvmupd_cmd upd_cmd;
- u8 module, transaction;
-
- /* anything that doesn't match a recognized case is an error */
- upd_cmd = I40E_NVMUPD_INVALID;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
-
- /* limits on data size */
- if ((cmd->data_size < 1) ||
- (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command data_size %d\n",
- cmd->data_size);
- *perrno = -EFAULT;
- return I40E_NVMUPD_INVALID;
- }
-
- switch (cmd->command) {
- case I40E_NVM_READ:
- switch (transaction) {
- case I40E_NVM_CON:
- upd_cmd = I40E_NVMUPD_READ_CON;
- break;
- case I40E_NVM_SNT:
- upd_cmd = I40E_NVMUPD_READ_SNT;
- break;
- case I40E_NVM_LCB:
- upd_cmd = I40E_NVMUPD_READ_LCB;
- break;
- case I40E_NVM_SA:
- upd_cmd = I40E_NVMUPD_READ_SA;
- break;
- case I40E_NVM_EXEC:
- if (module == 0xf)
- upd_cmd = I40E_NVMUPD_STATUS;
- else if (module == 0)
- upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
- break;
- case I40E_NVM_AQE:
- upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
- break;
- }
- break;
-
- case I40E_NVM_WRITE:
- switch (transaction) {
- case I40E_NVM_CON:
- upd_cmd = I40E_NVMUPD_WRITE_CON;
- break;
- case I40E_NVM_SNT:
- upd_cmd = I40E_NVMUPD_WRITE_SNT;
- break;
- case I40E_NVM_LCB:
- upd_cmd = I40E_NVMUPD_WRITE_LCB;
- break;
- case I40E_NVM_SA:
- upd_cmd = I40E_NVMUPD_WRITE_SA;
- break;
- case I40E_NVM_ERA:
- upd_cmd = I40E_NVMUPD_WRITE_ERA;
- break;
- case I40E_NVM_CSUM:
- upd_cmd = I40E_NVMUPD_CSUM_CON;
- break;
- case (I40E_NVM_CSUM|I40E_NVM_SA):
- upd_cmd = I40E_NVMUPD_CSUM_SA;
- break;
- case (I40E_NVM_CSUM|I40E_NVM_LCB):
- upd_cmd = I40E_NVMUPD_CSUM_LCB;
- break;
- case I40E_NVM_EXEC:
- if (module == 0)
- upd_cmd = I40E_NVMUPD_EXEC_AQ;
- break;
- }
- break;
- }
-
- return upd_cmd;
-}
-
-/**
- * i40e_nvmupd_exec_aq - Run an AQ command
+ * i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
+ * @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
* @perrno: pointer to return error code
*
- * cmd structure contains identifiers and data buffer
+ * Dispatches command depending on what update state is current
**/
-static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- struct i40e_asq_cmd_details cmd_details;
- struct i40e_aq_desc *aq_desc;
- u32 buff_size = 0;
- u8 *buff = NULL;
- u32 aq_desc_len;
- u32 aq_data_len;
+ enum i40e_nvmupd_cmd upd_cmd;
int status;
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- if (cmd->offset == 0xffff)
- return 0;
+ /* assume success */
+ *perrno = 0;
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- aq_desc_len = sizeof(struct i40e_aq_desc);
- memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
+ cmd->command, cmd->config, cmd->offset, cmd->data_size);
- /* get the aq descriptor */
- if (cmd->data_size < aq_desc_len) {
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
- cmd->data_size, aq_desc_len);
- *perrno = -EINVAL;
- return -EINVAL;
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
}
- aq_desc = (struct i40e_aq_desc *)bytes;
- /* if data buffer needed, make sure it's ready */
- aq_data_len = cmd->data_size - aq_desc_len;
- buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
- if (buff_size) {
- if (!hw->nvm_buff.va) {
- status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
- hw->aq.asq_buf_size);
- if (status)
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
- status);
- }
-
- if (hw->nvm_buff.va) {
- buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return -EINVAL;
}
- }
-
- if (cmd->offset)
- memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
-
- /* and away we go! */
- status = i40e_asq_send_command(hw, aq_desc, buff,
- buff_size, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "%s err %pe aq_err %s\n",
- __func__, ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
- return status;
- }
-
- /* should we wait for a followup event? */
- if (cmd->offset) {
- hw->nvm_wait_opcode = cmd->offset;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
- }
-
- return status;
-}
-/**
- * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
-{
- u32 aq_total_len;
- u32 aq_desc_len;
- int remainder;
- u8 *buff;
-
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
-
- aq_desc_len = sizeof(struct i40e_aq_desc);
- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+ bytes[0] = hw->nvmupd_state;
- /* check offset range */
- if (cmd->offset > aq_total_len) {
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
- __func__, cmd->offset, aq_total_len);
- *perrno = -EINVAL;
- return -EINVAL;
- }
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
- /* check copylength range */
- if (cmd->data_size > (aq_total_len - cmd->offset)) {
- int new_len = aq_total_len - cmd->offset;
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
- __func__, cmd->data_size, new_len);
- cmd->data_size = new_len;
+ return 0;
}
- remainder = cmd->data_size;
- if (cmd->offset < aq_desc_len) {
- u32 len = aq_desc_len - cmd->offset;
-
- len = min(len, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
- __func__, cmd->offset, cmd->offset + len);
-
- buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
-
- bytes += len;
- remainder -= len;
- buff = hw->nvm_buff.va;
- } else {
- buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
- if (remainder > 0) {
- int start_byte = buff - (u8 *)hw->nvm_buff.va;
-
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
- __func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
- }
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
+ */
+ mutex_lock(&hw->aq.arq_mutex);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT:
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
+ break;
- return 0;
-}
+ case I40E_NVMUPD_STATE_READING:
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
+ break;
-/**
- * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
-{
- u32 aq_total_len;
- u32 aq_desc_len;
+ case I40E_NVMUPD_STATE_WRITING:
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_clear_wait_state(hw);
+ status = 0;
+ break;
+ }
- aq_desc_len = sizeof(struct i40e_aq_desc);
- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+ status = -EBUSY;
+ *perrno = -EBUSY;
+ break;
- /* check copylength range */
- if (cmd->data_size > aq_total_len) {
+ default:
+ /* invalid state, should never happen */
i40e_debug(hw, I40E_DEBUG_NVM,
- "%s: copy length %d too big, trimming to %d\n",
- __func__, cmd->data_size, aq_total_len);
- cmd->data_size = aq_total_len;
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
+ status = -EOPNOTSUPP;
+ *perrno = -ESRCH;
+ break;
}
- memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
-
- return 0;
+ mutex_unlock(&hw->aq.arq_mutex);
+ return status;
}
/**
- * i40e_nvmupd_nvm_read - Read NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
+ * @hw: pointer to the hardware structure
**/
-static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- int status;
- bool last;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
-
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- bytes, last, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_read status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
}
+ hw->nvm_wait_opcode = 0;
- return status;
-}
-
-/**
- * i40e_nvmupd_nvm_erase - Erase an NVM module
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
- **/
-static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
-{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- int status = 0;
- bool last;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction & I40E_NVM_LCB);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
- status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- last, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_erase status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ default:
+ break;
}
-
- return status;
}
/**
- * i40e_nvmupd_nvm_write - Write NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
**/
-static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- u8 preservation_flags;
- int status = 0;
- bool last;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction & I40E_NVM_LCB);
- preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
-
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
- status = i40e_aq_update_nvm(hw, module, cmd->offset,
- (u16)cmd->data_size, bytes, last,
- preservation_flags, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_write status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ if (opcode == hw->nvm_wait_opcode) {
+ memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+ i40e_nvmupd_clear_wait_state(hw);
}
-
- return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 001162042050..5a0699ca7ce5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -371,13 +371,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
int i40e_set_mac_type(struct i40e_hw *hw);
-extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
-
-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return i40e_ptype_lookup[ptype];
-}
-
/**
* i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
* @link_speed: the speed to convert
@@ -501,4 +494,73 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+/* Firmware and AdminQ version check helpers */
+
+/**
+ * i40e_is_aq_api_ver_ge
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static inline bool i40e_is_aq_api_ver_ge(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.api_maj_ver > maj ||
+ (hw->aq.api_maj_ver == maj && hw->aq.api_min_ver >= min));
+}
+
+/**
+ * i40e_is_aq_api_ver_lt
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current HW API version is less than provided.
+ **/
+static inline bool i40e_is_aq_api_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return !i40e_is_aq_api_ver_ge(hw, maj, min);
+}
+
+/**
+ * i40e_is_fw_ver_ge
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is greater/equal than provided.
+ **/
+static inline bool i40e_is_fw_ver_ge(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.fw_maj_ver > maj ||
+ (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver >= min));
+}
+
+/**
+ * i40e_is_fw_ver_lt
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is less than provided.
+ **/
+static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return !i40e_is_fw_ver_ge(hw, maj, min);
+}
+
+/**
+ * i40e_is_fw_ver_eq
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is equal to provided.
+ **/
+static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min);
+}
+
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 20b77398f060..b72a4b5d76b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -35,7 +35,7 @@ enum i40e_ptp_pin {
GPIO_4
};
-enum i40e_can_set_pins_t {
+enum i40e_can_set_pins {
CANT_DO_PINS = -1,
CAN_SET_PINS,
CAN_DO_PINS
@@ -193,7 +193,7 @@ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw)
* return CAN_DO_PINS if pins can be manipulated within a NIC or
* return CANT_DO_PINS otherwise.
**/
-static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf)
+static enum i40e_can_set_pins i40e_can_set_pins(struct i40e_pf *pf)
{
if (!i40e_is_ptp_pin_dev(&pf->hw)) {
dev_warn(&pf->pdev->dev,
@@ -680,7 +680,7 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf)
* configured. We don't want to spuriously warn about Rx timestamp
* hangs if we don't care about the timestamps.
*/
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx)
return;
spin_lock_bh(&pf->ptp_rx_lock);
@@ -733,7 +733,7 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf)
{
struct sk_buff *skb;
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx)
return;
/* Nothing to do if we're not already waiting for a timestamp */
@@ -771,7 +771,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
u32 hi, lo;
u64 ns;
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx)
return;
/* don't attempt to timestamp if we don't have an skb */
@@ -818,7 +818,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
/* Since we cannot turn off the Rx timestamp logic if the device is
* doing Tx timestamping, check if Rx timestamping is configured.
*/
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx)
return;
hw = &pf->hw;
@@ -924,7 +924,7 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config = &pf->tstamp_config;
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
@@ -1071,7 +1071,7 @@ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
static int i40e_ptp_set_pins(struct i40e_pf *pf,
struct i40e_ptp_pins_settings *pins)
{
- enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf);
+ enum i40e_can_set_pins pin_caps = i40e_can_set_pins(pf);
int i = 0;
if (pin_caps == CANT_DO_PINS)
@@ -1211,7 +1211,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
return -ERANGE;
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
@@ -1225,7 +1225,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
return -ERANGE;
fallthrough;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -1234,7 +1234,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
I40E_PRTTSYN_CTL1_TSYNTYPE_V2;
- if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) {
tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
} else {
@@ -1308,7 +1308,7 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -1426,7 +1426,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
void i40e_ptp_save_hw_time(struct i40e_pf *pf)
{
/* don't try to access the PTP clock if it's not enabled */
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return;
i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL);
@@ -1472,7 +1472,8 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
**/
void i40e_ptp_init(struct i40e_pf *pf)
{
- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct net_device *netdev = vsi->netdev;
struct i40e_hw *hw = &pf->hw;
u32 pf_id;
long err;
@@ -1480,10 +1481,10 @@ void i40e_ptp_init(struct i40e_pf *pf)
/* Only one PF is assigned to control 1588 logic per port. Do not
* enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID
*/
- pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
- I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ pf_id = FIELD_GET(I40E_PRTTSYN_CTL0_PF_ID_MASK,
+ rd32(hw, I40E_PRTTSYN_CTL0));
if (hw->pf_id != pf_id) {
- pf->flags &= ~I40E_FLAG_PTP;
+ clear_bit(I40E_FLAG_PTP_ENA, pf->flags);
dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n",
__func__,
netdev->name);
@@ -1504,7 +1505,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
if (pf->hw.debug_mask & I40E_DEBUG_LAN)
dev_info(&pf->pdev->dev, "PHC enabled\n");
- pf->flags |= I40E_FLAG_PTP;
+ set_bit(I40E_FLAG_PTP_ENA, pf->flags);
/* Ensure the clocks are running. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
@@ -1536,10 +1537,11 @@ void i40e_ptp_init(struct i40e_pf *pf)
**/
void i40e_ptp_stop(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
u32 regval;
- pf->flags &= ~I40E_FLAG_PTP;
+ clear_bit(I40E_FLAG_PTP_ENA, pf->flags);
pf->ptp_tx = false;
pf->ptp_rx = false;
@@ -1555,7 +1557,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
ptp_clock_unregister(pf->ptp_clock);
pf->ptp_clock = NULL;
dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
- pf->vsi[pf->lan_vsi]->netdev->name);
+ main_vsi->netdev->name);
}
if (i40e_is_ptp_pin_dev(&pf->hw)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index f408fcf23ce8..432afbb64201 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -207,7 +207,7 @@
#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
@@ -333,8 +333,11 @@
#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
@@ -863,16 +866,6 @@
#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
#define I40E_PFPM_WUFC_MAG_SHIFT 1
#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
-#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
-#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
-#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
-#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
-#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
-#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
-#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
-#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
-#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VFQF_HLUT_MAX_INDEX 15
@@ -899,6 +892,7 @@
#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
/* Redefined for X722 family */
#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index dd410b15000f..c006f716a3bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
#include <net/mpls.h>
@@ -23,7 +24,7 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
- u32 flex_ptype, dtype_cmd;
+ u32 flex_ptype, dtype_cmd, vsi_id;
u16 i;
/* grab the next descriptor */
@@ -33,19 +34,16 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
- (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+ flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index);
- flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
- (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_FLEXOFF_MASK,
+ fdata->flex_off);
- flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
- (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
/* Use LAN VSI Id if not programmed by user */
- flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
- ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ vsi_id = fdata->dest_vsi ? : i40e_pf_get_main_vsi(pf)->id;
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, vsi_id);
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
@@ -55,17 +53,15 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
- (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl);
- dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
- (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_FD_STATUS_MASK,
+ fdata->fd_status);
if (fdata->cnt_index) {
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
- ((u32)fdata->cnt_index <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ fdata->cnt_index);
}
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -464,7 +460,7 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
&pf->fd_tcp6_filter_cnt);
if (add) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
@@ -691,8 +687,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
u32 error;
qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
- error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
- I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+ error = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK, qword1);
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
@@ -734,7 +729,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
!test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
pf->state))
if (I40E_DEBUG_FD & pf->hw.debug_mask)
@@ -866,13 +861,15 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @vsi: pointer to vsi struct with tx queues
+ * @pf: pointer to PF struct
*
- * VSI has netdev and netdev has TX queues. This function is to check each of
- * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
**/
-void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+void i40e_detect_recover_hung(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_ring *tx_ring = NULL;
struct net_device *netdev;
unsigned int i;
@@ -1071,7 +1068,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
if (q_vector->arm_wb_state)
return;
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
@@ -1095,7 +1092,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
**/
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
@@ -1403,8 +1400,7 @@ void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
{
u8 id;
- id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
- I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+ id = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK, qword1);
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
@@ -1555,7 +1551,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int err;
u64_stats_init(&rx_ring->syncp);
@@ -1576,14 +1571,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
- /* XDP RX-queue info only needed for RX rings exposed to XDP */
- if (rx_ring->vsi->type == I40E_VSI_MAIN) {
- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
- if (err < 0)
- return err;
- }
-
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
rx_ring->rx_bi =
@@ -1757,40 +1744,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
- decoded = decode_rx_desc_ptype(ptype);
-
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
- /* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
return;
+ rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
+
/* did the hardware decode the packet and checksum? */
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* both known and outer_ip must be set for the below code to work */
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 &&
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1818,20 +1795,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case I40E_RX_PTYPE_INNER_PROT_TCP:
- case I40E_RX_PTYPE_INNER_PROT_UDP:
- case I40E_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- fallthrough;
- default:
- break;
- }
-
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -1839,29 +1806,6 @@ checksum_fail:
}
/**
- * i40e_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- **/
-static inline int i40e_ptype_to_htype(u8 ptype)
-{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
-
- if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- else
- return PKT_HASH_TYPE_L2;
-}
-
-/**
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
@@ -1873,17 +1817,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
struct sk_buff *skb,
u8 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (!(ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -1901,13 +1847,10 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb)
{
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ u32 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
- u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
+ u32 tsyn = FIELD_GET(I40E_RXD_QW1_STATUS_TSYNINDX_MASK, rx_status);
+ u8 rx_ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
if (unlikely(tsynvalid))
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -2099,7 +2042,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
struct xdp_buff *xdp)
{
- u32 next = rx_ring->next_to_clean;
+ u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ u32 next = rx_ring->next_to_clean, i = 0;
struct i40e_rx_buffer *rx_buffer;
xdp->flags = 0;
@@ -2112,10 +2056,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
if (!rx_buffer->page)
continue;
- if (xdp_res == I40E_XDP_CONSUMED)
- rx_buffer->pagecnt_bias++;
- else
+ if (xdp_res != I40E_XDP_CONSUMED)
i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+ else if (i++ <= nr_frags)
+ rx_buffer->pagecnt_bias++;
/* EOP buffer will be put in i40e_clean_rx_irq() */
if (next == rx_ring->next_to_process)
@@ -2129,20 +2073,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp,
- u32 nr_frags)
+ struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
struct i40e_rx_buffer *rx_buffer;
+ struct skb_shared_info *sinfo;
unsigned int headlen;
struct sk_buff *skb;
+ u32 nr_frags = 0;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
@@ -2164,9 +2108,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
*/
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE);
if (unlikely(!skb))
return NULL;
@@ -2180,6 +2122,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
@@ -2199,9 +2145,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
- sinfo = xdp_get_shared_info_from_buff(xdp);
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
@@ -2224,17 +2169,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp,
- u32 nr_frags)
+ struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo;
struct sk_buff *skb;
+ u32 nr_frags;
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
@@ -2243,6 +2188,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
*/
net_prefetch(xdp->data_meta);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
@@ -2255,9 +2205,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
nr_frags * xdp->frame_sz,
@@ -2554,8 +2501,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
continue;
}
- size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
if (!size)
break;
@@ -2602,9 +2548,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
total_rx_bytes += size;
} else {
if (ring_uses_build_skb(rx_ring))
- skb = i40e_build_skb(rx_ring, xdp, nfrags);
+ skb = i40e_build_skb(rx_ring, xdp);
else
- skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+ skb = i40e_construct_skb(rx_ring, xdp);
/* drop if we failed to retrieve a buffer */
if (!skb) {
@@ -2646,7 +2592,22 @@ process_next:
return failure ? budget : (int)total_rx_packets;
}
-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+/**
+ * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
+ * @itr_idx: interrupt throttling index
+ * @interval: interrupt throttling interval value in usecs
+ * @force_swint: force software interrupt
+ *
+ * The function builds a value for I40E_PFINT_DYN_CTLN register that
+ * is used to update interrupt throttling interval for specified ITR index
+ * and optionally enforces a software interrupt. If the @itr_idx is equal
+ * to I40E_ITR_NONE then no interval change is applied and only @force_swint
+ * parameter is taken into account. If the interval change and enforced
+ * software interrupt are not requested then the built value just enables
+ * appropriate vector interrupt.
+ **/
+static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
+ bool force_swint)
{
u32 val;
@@ -2660,23 +2621,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
* an event in the PBA anyway so we need to rely on the automask
* to hold pending events for us until the interrupt is re-enabled
*
- * The itr value is reported in microseconds, and the register
- * value is recorded in 2 microsecond units. For this reason we
- * only need to shift by the interval shift - 1 instead of the
- * full value.
+ * We have to shift the given value as it is reported in microseconds
+ * and the register value is recorded in 2 microsecond units.
*/
- itr &= I40E_ITR_MASK;
+ interval >>= 1;
+ /* 1. Enable vector interrupt
+ * 2. Update the interval for the specified ITR index
+ * (I40E_ITR_NONE in the register is used to indicate that
+ * no interval update is requested)
+ */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
+
+ /* 3. Enforce software interrupt trigger if requested
+ * (These software interrupts rate is limited by ITR2 that is
+ * set to 20K interrupts per second)
+ */
+ if (force_swint)
+ val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
+ I40E_SW_ITR);
return val;
}
-/* a small macro to shorten up some long lines */
-#define INTREG I40E_PFINT_DYN_CTLN
-
/* The act of updating the ITR will cause it to immediately trigger. In order
* to prevent this from throwing off adaptive update statistics we defer the
* update so that it can only happen so often. So after either Tx or Rx are
@@ -2695,11 +2666,13 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
+ enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
struct i40e_hw *hw = &vsi->back->hw;
- u32 intval;
+ u16 interval = 0;
+ u32 itr_val;
/* If we don't have MSIX, then we only need to re-enable icr0 */
- if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
+ if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
i40e_irq_dynamic_enable_icr0(vsi->back);
return;
}
@@ -2718,8 +2691,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
@@ -2728,25 +2701,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- intval = i40e_buildreg_itr(I40E_TX_ITR,
- q_vector->tx.target_itr);
+ itr_idx = I40E_TX_ITR;
+ interval = q_vector->tx.target_itr;
q_vector->tx.current_itr = q_vector->tx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
/* Rx ITR needs to be increased, third priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* No ITR update, lowest priority */
- intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
}
- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(q_vector->reg_idx), intval);
+ /* Do not update interrupt control register if VSI is down */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ /* Update ITR interval if necessary and enforce software interrupt
+ * if we are exiting busy poll.
+ */
+ if (q_vector->in_busy_poll) {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, true);
+ q_vector->in_busy_poll = false;
+ } else {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, false);
+ }
+ wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
}
/**
@@ -2861,6 +2845,8 @@ tx_only:
*/
if (likely(napi_complete_done(napi, work_done)))
i40e_update_enable_itr(vsi, q_vector);
+ else
+ q_vector->in_busy_poll = true;
return min(work_done, budget - 1);
}
@@ -2888,7 +2874,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i;
/* make sure ATR is enabled */
- if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
return;
if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
@@ -2933,7 +2919,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
return;
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
@@ -2959,8 +2945,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
+ flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
+ tx_ring->queue_index);
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
(I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
@@ -2986,16 +2972,14 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
dtype_cmd |=
- ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ I40E_FD_ATR_STAT_IDX(pf->hw.pf_id));
else
dtype_cmd |=
- ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id));
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags))
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -3053,7 +3037,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= I40E_TX_FLAGS_SW_VLAN;
}
- if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
goto out;
/* Insert 802.1p priority into VLAN header */
@@ -3229,7 +3213,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
* we are not already transmitting a packet to be timestamped
*/
pf = i40e_netdev_to_pf(tx_ring->netdev);
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return 0;
if (pf->ptp_tx &&
@@ -3601,8 +3585,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
- I40E_TX_FLAGS_VLAN_SHIFT;
+ td_tag = FIELD_GET(I40E_TX_FLAGS_VLAN_MASK, tx_flags);
}
first->tx_flags = tx_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 421fe5675584..7c26c9a2bf65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -58,7 +58,7 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl)
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2.
*/
-enum i40e_dyn_idx_t {
+enum i40e_dyn_idx {
I40E_IDX_ITR0 = 0,
I40E_IDX_ITR1 = 1,
I40E_IDX_ITR2 = 2,
@@ -68,6 +68,7 @@ enum i40e_dyn_idx_t {
/* these are indexes into ITRN registers */
#define I40E_RX_ITR I40E_IDX_ITR0
#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
@@ -92,8 +93,8 @@ enum i40e_dyn_idx_t {
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
#define i40e_pf_get_default_rss_hena(pf) \
- (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
@@ -306,7 +307,7 @@ struct i40e_rx_queue_stats {
u64 page_busy_count;
};
-enum i40e_ring_state_t {
+enum i40e_ring_state {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
__I40E_RING_STATE_NBITS /* must be last */
@@ -469,7 +470,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
-void i40e_detect_recover_hung(struct i40e_vsi *vsi);
+void i40e_detect_recover_hung(struct i40e_pf *pf);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index aff6dc6afbe2..28568e126850 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -37,11 +37,11 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
-#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1)
#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2)
-#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0)
#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0)
#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3)
@@ -64,9 +64,7 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
I40E_MAC_XL710,
- I40E_MAC_VF,
I40E_MAC_X722,
- I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -272,9 +270,7 @@ struct i40e_mac_info {
enum i40e_mac_type type;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
- u8 san_addr[ETH_ALEN];
u8 port_addr[ETH_ALEN];
- u16 max_fcoeq;
};
enum i40e_aq_resources_ids {
@@ -482,6 +478,36 @@ struct i40e_dcbx_config {
struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
};
+enum i40e_hw_flags {
+ I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE,
+ I40E_HW_CAP_802_1AD,
+ I40E_HW_CAP_AQ_PHY_ACCESS,
+ I40E_HW_CAP_NVM_READ_REQUIRES_LOCK,
+ I40E_HW_CAP_FW_LLDP_STOPPABLE,
+ I40E_HW_CAP_FW_LLDP_PERSISTENT,
+ I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED,
+ I40E_HW_CAP_X722_FEC_REQUEST,
+ I40E_HW_CAP_RSS_AQ,
+ I40E_HW_CAP_128_QP_RSS,
+ I40E_HW_CAP_ATR_EVICT,
+ I40E_HW_CAP_WB_ON_ITR,
+ I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ I40E_HW_CAP_NO_PCI_LINK_CHECK,
+ I40E_HW_CAP_100M_SGMII,
+ I40E_HW_CAP_NO_DCB_SUPPORT,
+ I40E_HW_CAP_USE_SET_LLDP_MIB,
+ I40E_HW_CAP_GENEVE_OFFLOAD,
+ I40E_HW_CAP_PTP_L4,
+ I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE,
+ I40E_HW_CAP_CRT_RETIMER,
+ I40E_HW_CAP_OUTER_UDP_CSUM,
+ I40E_HW_CAP_PHY_CONTROLS_LEDS,
+ I40E_HW_CAP_STOP_FW_LLDP,
+ I40E_HW_CAP_PORT_ID_VALID,
+ I40E_HW_CAP_RESTART_AUTONEG,
+ I40E_HW_CAPS_NBITS,
+};
+
/* Port hardware description */
struct i40e_hw {
u8 __iomem *hw_addr;
@@ -546,16 +572,7 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
-#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
-#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
-#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
-#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
-#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
-#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
-#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
-#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
- u64 flags;
+ DECLARE_BITMAP(caps, I40E_HW_CAPS_NBITS);
/* Used in set switch config AQ command */
u16 switch_tag;
@@ -567,12 +584,6 @@ struct i40e_hw {
char err_str[16];
};
-static inline bool i40e_is_vf(struct i40e_hw *hw)
-{
- return (hw->mac.type == I40E_MAC_VF ||
- hw->mac.type == I40E_MAC_X722_VF);
-}
-
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
@@ -734,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
#define I40E_RXD_QW1_PTYPE_SHIFT 30
#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
-/* Packet type non-ip values */
-enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct i40e_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum i40e_rx_ptype_outer_ip {
- I40E_RX_PTYPE_OUTER_L2 = 0,
- I40E_RX_PTYPE_OUTER_IP = 1
-};
-
-enum i40e_rx_ptype_outer_ip_ver {
- I40E_RX_PTYPE_OUTER_NONE = 0,
- I40E_RX_PTYPE_OUTER_IPV4 = 0,
- I40E_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum i40e_rx_ptype_outer_fragmented {
- I40E_RX_PTYPE_NOT_FRAG = 0,
- I40E_RX_PTYPE_FRAG = 1
-};
-
-enum i40e_rx_ptype_tunnel_type {
- I40E_RX_PTYPE_TUNNEL_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum i40e_rx_ptype_tunnel_end_prot {
- I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum i40e_rx_ptype_inner_prot {
- I40E_RX_PTYPE_INNER_PROT_NONE = 0,
- I40E_RX_PTYPE_INNER_PROT_UDP = 1,
- I40E_RX_PTYPE_INNER_PROT_TCP = 2,
- I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
- I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
- I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum i40e_rx_ptype_payload_layer {
- I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 08d7edccfb8d..662622f01e31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -154,6 +154,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ u16 vf_id;
+ u16 pos;
+
+ /* Continue only if this is a PF */
+ if (!pdev->is_physfn)
+ return;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vf_dev = NULL;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
+ while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
+ if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
+ pci_restore_msi_state(vf_dev);
+ }
+ }
+}
+#endif /* CONFIG_PCI_IOV */
+
/**
* i40e_vc_notify_vf_reset
* @vf: pointer to the VF structure
@@ -465,8 +491,6 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
u32 v_idx, reg_idx, reg;
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
v_idx = qv_info->v_idx;
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
/* Figure out the queue after CEQ and make that the
@@ -474,10 +498,10 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
*/
reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
- next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
- >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
- next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
- >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
+ next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK,
+ reg);
+ next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK,
+ reg);
reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
reg = (next_q_index &
@@ -536,8 +560,6 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
/* Validate vector id belongs to this vf */
if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
@@ -555,10 +577,10 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
* queue on top. Also link it with the new queue in CEQCTL.
*/
reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
- next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
- next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
- I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK,
+ reg);
+ next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK,
+ reg);
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
@@ -659,11 +681,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* associate this queue with the PCI VF function */
qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
- qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
- & I40E_QTX_CTL_PF_INDX_MASK);
- qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
- << I40E_QTX_CTL_VFVM_INDX_SHIFT)
- & I40E_QTX_CTL_VFVM_INDX_MASK);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ vf->vf_id + hw->func_caps.vf_base_id);
wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
i40e_flush(hw);
@@ -775,13 +795,13 @@ error_param:
static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
{
struct i40e_mac_filter *f = NULL;
+ struct i40e_vsi *main_vsi, *vsi;
struct i40e_pf *pf = vf->pf;
- struct i40e_vsi *vsi;
u64 max_tx_rate = 0;
int ret = 0;
- vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
- vf->vf_id);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id);
if (!vsi) {
dev_err(&pf->pdev->dev,
@@ -1604,8 +1624,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
- int i, v;
u32 reg;
+ int i;
/* If we don't have any VFs, then there is nothing to reset */
if (!pf->num_alloc_vfs)
@@ -1616,11 +1636,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- vf = &pf->vf[v];
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is being reset no need to trigger reset again */
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ i40e_trigger_vf_reset(vf, flr);
}
/* HW requires some time to make sure it can flush the FIFO for a VF
@@ -1629,14 +1648,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
* the VFs using a simple iterator that increments once that VF has
* finished resetting.
*/
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
usleep_range(10000, 20000);
/* Check each VF in sequence, beginning with the VF to fail
* the previous check.
*/
- while (v < pf->num_alloc_vfs) {
- vf = &pf->vf[v];
+ while (vf < &pf->vf[pf->num_alloc_vfs]) {
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
@@ -1646,7 +1664,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
*/
- v++;
+ ++vf;
}
}
@@ -1656,39 +1674,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* Display a warning if at least one VF didn't manage to reset in
* time, but continue on with the operation.
*/
- if (v < pf->num_alloc_vfs)
+ if (vf < &pf->vf[pf->num_alloc_vfs])
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
- pf->vf[v].vf_id);
+ vf->vf_id);
usleep_range(10000, 20000);
/* Begin disabling all the rings associated with VFs, but do not wait
* between each VF.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
}
/* Now that we've notified HW to disable all of the VF rings, wait
* until they finish.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
}
/* Hw may need up to 50ms to finish disabling the RX queues. We
@@ -1697,12 +1715,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_cleanup_reset_vf(&pf->vf[v]);
+ i40e_cleanup_reset_vf(vf);
}
i40e_flush(hw);
@@ -1808,7 +1826,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
pf->num_alloc_vfs = 0;
goto err_iov;
}
@@ -1919,8 +1937,8 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
}
if (num_vfs) {
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
@@ -1929,7 +1947,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
@@ -2137,14 +2155,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
- if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
else
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) {
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |=
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
@@ -2153,12 +2171,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
- if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
+ if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) &&
(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev,
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
vf->vf_id);
@@ -2168,7 +2186,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
}
- if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) {
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
vfres->vf_cap_flags |=
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
@@ -2581,6 +2599,14 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
int i;
+ if (vf->is_disabled_from_host) {
+ aq_ret = -EPERM;
+ dev_info(&pf->pdev->dev,
+ "Admin has disabled VF %d, will not enable queues\n",
+ vf->vf_id);
+ goto error_param;
+ }
+
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = -EINVAL;
goto error_param;
@@ -2816,6 +2842,24 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/**
+ * i40e_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !vf->trusted)
+ return false;
+
+ return true;
+}
+
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2875,8 +2919,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
+ if (!i40e_can_vf_change_mac(vf) &&
+ !is_multicast_ether_addr(addr) &&
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
@@ -3082,19 +3126,30 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = -EINVAL;
goto error_param;
}
- if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
- was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
- for (i = 0; i < al->num_elements; i++)
+ for (i = 0; i < al->num_elements; i++) {
+ const u8 *addr = al->list[i].addr;
+
+ /* Allow to delete VF primary MAC only if it was not set
+ * administratively by PF or if VF is trusted.
+ */
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+ }
+
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ }
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -3267,8 +3322,9 @@ error_param:
static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
- int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ struct i40e_vsi *main_vsi;
int aq_ret = 0;
+ int abs_vf_id;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
@@ -3276,8 +3332,9 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
- msg, msglen);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen);
error_param:
/* send the response to the VF */
@@ -3521,16 +3578,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
bool found = false;
int bkt;
- if (!tc_filter->action) {
+ if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
dev_info(&pf->pdev->dev,
- "VF %d: Currently ADq doesn't support Drop Action\n",
- vf->vf_id);
+ "VF %d: ADQ doesn't support this action (%d)\n",
+ vf->vf_id, tc_filter->action);
goto err;
}
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
- tc_filter->action_meta > I40E_MAX_VF_VSI) {
+ tc_filter->action_meta > vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
@@ -3844,7 +3901,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
int aq_ret = 0;
- int i, ret;
+ int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = -EINVAL;
@@ -3868,8 +3925,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
- if (!cfilter)
- return -ENOMEM;
+ if (!cfilter) {
+ aq_ret = -ENOMEM;
+ goto err_out;
+ }
/* parse destination mac address */
for (i = 0; i < ETH_ALEN; i++)
@@ -3917,13 +3976,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
/* Adding cloud filter programmed as TC filter */
if (tcf.dst_port)
- ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
else
- ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
- if (ret) {
+ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
- vf->vf_id, ERR_PTR(ret),
+ vf->vf_id, ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -4673,9 +4732,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->max_tx_rate = vf->tx_rate;
ivi->min_tx_rate = 0;
- ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
- ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
- I40E_VLAN_PRIORITY_SHIFT;
+ ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK);
+ ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK);
if (vf->link_forced == false)
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->link_up == true)
@@ -4706,9 +4764,12 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
struct i40e_link_status *ls = &pf->hw.phy.link_info;
struct virtchnl_pf_event pfe;
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ unsigned long q_map;
struct i40e_vf *vf;
int abs_vf_id;
int ret = 0;
+ int tmp;
if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
@@ -4731,17 +4792,38 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
switch (link) {
case IFLA_VF_LINK_STATE_AUTO:
vf->link_forced = false;
+ vf->is_disabled_from_host = false;
+ /* reset needed to reinit VF resources */
+ i40e_vc_reset_vf(vf, true);
i40e_set_vf_link_state(vf, &pfe, ls);
break;
case IFLA_VF_LINK_STATE_ENABLE:
vf->link_forced = true;
vf->link_up = true;
+ vf->is_disabled_from_host = false;
+ /* reset needed to reinit VF resources */
+ i40e_vc_reset_vf(vf, true);
i40e_set_vf_link_state(vf, &pfe, ls);
break;
case IFLA_VF_LINK_STATE_DISABLE:
vf->link_forced = true;
vf->link_up = false;
i40e_set_vf_link_state(vf, &pfe, ls);
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ q_map = BIT(vsi->num_queue_pairs) - 1;
+
+ vf->is_disabled_from_host = true;
+
+ /* Try to stop both Tx&Rx rings even if one of the calls fails
+ * to ensure we stop the rings even in case of errors.
+ * If any of them returns with an error then the first
+ * error that occurred will be returned.
+ */
+ tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
+ ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
+
+ ret = tmp ? tmp : ret;
break;
default:
ret = -EINVAL;
@@ -4841,7 +4923,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
}
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
ret = -EINVAL;
goto out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 2ee0f8a23248..66f95e2f3146 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -100,6 +100,7 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
+ bool is_disabled_from_host; /* PF ctrl of VF enable/disable */
u16 num_vlan;
/* ADq related variables */
@@ -137,6 +138,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+#ifdef CONFIG_PCI_IOV
+void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_IOV */
int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e99fa854d17f..a85b425794df 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -301,8 +301,7 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
goto out;
@@ -414,7 +413,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start), 0, size);
+ virt_to_page(xdp->data_hard_start),
+ XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@@ -476,8 +476,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
continue;
}
- size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
if (!size)
break;
@@ -499,7 +498,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
&rx_bytes, xdp_res, &failure);
- first->flags = 0;
next_to_clean = next_to_process;
if (failure)
break;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e7ab89dc883a..23a6557fc3db 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -287,11 +287,12 @@ struct iavf_adapter {
#define IAVF_FLAG_RESET_PENDING BIT(4)
#define IAVF_FLAG_RESET_NEEDED BIT(5)
#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
-#define IAVF_FLAG_LEGACY_RX BIT(15)
+/* BIT(15) is free, was IAVF_FLAG_LEGACY_RX */
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
+#define IAVF_FLAG_FDIR_ENABLED BIT(21)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
@@ -312,7 +313,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
-#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
+#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
+#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(16)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
@@ -414,6 +416,7 @@ struct iavf_adapter {
struct iavf_vsi vsi;
u32 aq_wait_count;
/* RSS stuff */
+ enum virtchnl_rss_algorithm hfunc;
u64 hena;
u16 rss_key_size;
u16 rss_lut_size;
@@ -539,6 +542,7 @@ void iavf_get_hena(struct iavf_adapter *adapter);
void iavf_set_hena(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
+void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
void iavf_enable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_disable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 9ffbd24d83cb..82fcd18ad660 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -8,27 +8,6 @@
#include "iavf_prototype.h"
/**
- * iavf_adminq_init_regs - Initialize AdminQ registers
- * @hw: pointer to the hardware structure
- *
- * This assumes the alloc_asq and alloc_arq functions have already been called
- **/
-static void iavf_adminq_init_regs(struct iavf_hw *hw)
-{
- /* set head and tail registers in our local struct */
- hw->aq.asq.tail = IAVF_VF_ATQT1;
- hw->aq.asq.head = IAVF_VF_ATQH1;
- hw->aq.asq.len = IAVF_VF_ATQLEN1;
- hw->aq.asq.bal = IAVF_VF_ATQBAL1;
- hw->aq.asq.bah = IAVF_VF_ATQBAH1;
- hw->aq.arq.tail = IAVF_VF_ARQT1;
- hw->aq.arq.head = IAVF_VF_ARQH1;
- hw->aq.arq.len = IAVF_VF_ARQLEN1;
- hw->aq.arq.bal = IAVF_VF_ARQBAL1;
- hw->aq.arq.bah = IAVF_VF_ARQBAH1;
-}
-
-/**
* iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
@@ -259,17 +238,17 @@ static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, IAVF_VF_ATQH1, 0);
+ wr32(hw, IAVF_VF_ATQT1, 0);
/* set starting point */
- wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries |
IAVF_VF_ATQLEN1_ATQENABLE_MASK));
- wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
- wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.asq.bal);
+ reg = rd32(hw, IAVF_VF_ATQBAL1);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
@@ -288,20 +267,20 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, IAVF_VF_ARQH1, 0);
+ wr32(hw, IAVF_VF_ARQT1, 0);
/* set starting point */
- wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries |
IAVF_VF_ARQLEN1_ARQENABLE_MASK));
- wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
- wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
- wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+ wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.arq.bal);
+ reg = rd32(hw, IAVF_VF_ARQBAL1);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
@@ -455,11 +434,11 @@ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
- wr32(hw, hw->aq.asq.len, 0);
- wr32(hw, hw->aq.asq.bal, 0);
- wr32(hw, hw->aq.asq.bah, 0);
+ wr32(hw, IAVF_VF_ATQH1, 0);
+ wr32(hw, IAVF_VF_ATQT1, 0);
+ wr32(hw, IAVF_VF_ATQLEN1, 0);
+ wr32(hw, IAVF_VF_ATQBAL1, 0);
+ wr32(hw, IAVF_VF_ATQBAH1, 0);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
@@ -489,11 +468,11 @@ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
- wr32(hw, hw->aq.arq.len, 0);
- wr32(hw, hw->aq.arq.bal, 0);
- wr32(hw, hw->aq.arq.bah, 0);
+ wr32(hw, IAVF_VF_ARQH1, 0);
+ wr32(hw, IAVF_VF_ARQT1, 0);
+ wr32(hw, IAVF_VF_ARQLEN1, 0);
+ wr32(hw, IAVF_VF_ARQBAL1, 0);
+ wr32(hw, IAVF_VF_ARQBAH1, 0);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
@@ -529,9 +508,6 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
goto init_adminq_exit;
}
- /* Set up register offsets */
- iavf_adminq_init_regs(hw);
-
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
@@ -587,9 +563,9 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
- while (rd32(hw, hw->aq.asq.head) != ntc) {
+ while (rd32(hw, IAVF_VF_ATQH1) != ntc) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1));
if (details->callback) {
IAVF_ADMINQ_CALLBACK cb_func =
@@ -624,7 +600,7 @@ bool iavf_asq_done(struct iavf_hw *hw)
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+ return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use;
}
/**
@@ -663,7 +639,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
hw->aq.asq_last_status = IAVF_AQ_RC_OK;
- val = rd32(hw, hw->aq.asq.head);
+ val = rd32(hw, IAVF_VF_ATQH1);
if (val >= hw->aq.num_asq_entries) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
@@ -755,7 +731,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
- wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+ wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
@@ -810,7 +786,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
@@ -878,7 +854,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
}
/* set next_to_use to head */
- ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
+ ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
@@ -926,7 +902,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
- wr32(hw, hw->aq.arq.tail, ntc);
+ wr32(hw, IAVF_VF_ARQT1, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index 1f60518eb0e5..406506f64bdd 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -29,13 +29,6 @@ struct iavf_adminq_ring {
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
-
- /* used for queue tracking */
- u32 head;
- u32 tail;
- u32 len;
- u32 bah;
- u32 bal;
};
/* ASQ transaction details */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
index 6edbf134b73f..a9e1da35e248 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
@@ -95,17 +95,21 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
* @rss_cfg: the virtchnl message to be filled with RSS configuration setting
* @packet_hdrs: the RSS configuration protocol header types
* @hash_flds: the RSS configuration protocol hash fields
+ * @symm: if true, symmetric hash is required
*
* Returns 0 if the RSS configuration virtchnl message is filled successfully
*/
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
- u32 packet_hdrs, u64 hash_flds)
+ u32 packet_hdrs, u64 hash_flds, bool symm)
{
struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
struct virtchnl_proto_hdr *hdr;
- rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ if (symm)
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ else
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
proto_hdrs->tunnel_level = 0; /* always outer layer */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
index 4d3be11af7aa..e31eb2afebea 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
@@ -80,13 +80,14 @@ struct iavf_adv_rss {
u32 packet_hdrs;
u64 hash_flds;
+ bool symm;
struct virtchnl_rss_cfg cfg_msg;
};
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
- u32 packet_hdrs, u64 hash_flds);
+ u32 packet_hdrs, u64 hash_flds, bool symm);
struct iavf_adv_rss *
iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs);
void
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 8091e6feca01..aa751ce3425b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/avf/virtchnl.h>
+#include <linux/bitfield.h>
#include "iavf_type.h"
#include "iavf_adminq.h"
#include "iavf_prototype.h"
-#include <linux/avf/virtchnl.h>
/**
* iavf_aq_str - convert AQ err code to a string
@@ -279,11 +280,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
**/
bool iavf_check_asq_alive(struct iavf_hw *hw)
{
- if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) &
- IAVF_VF_ATQLEN1_ATQENABLE_MASK);
- else
+ /* Check if the queue is initialized */
+ if (!hw->aq.asq.count)
return false;
+
+ return !!(rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQENABLE_MASK);
}
/**
@@ -330,6 +331,7 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
struct iavf_aq_desc desc;
struct iavf_aqc_get_set_rss_lut *cmd_resp =
(struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
+ u16 flags;
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
@@ -342,22 +344,18 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
+ vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
if (pf_lut)
- cmd_resp->flags |= cpu_to_le16((u16)
- ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF);
else
- cmd_resp->flags |= cpu_to_le16((u16)
- ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI);
+
+ cmd_resp->flags = cpu_to_le16(flags);
status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
@@ -411,11 +409,9 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
+ vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
@@ -436,259 +432,6 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT iavf_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum iavf_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
- IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- IAVF_RX_PTYPE_##OUTER_FRAG, \
- IAVF_RX_PTYPE_TUNNEL_##T, \
- IAVF_RX_PTYPE_TUNNEL_END_##TE, \
- IAVF_RX_PTYPE_##TEF, \
- IAVF_RX_PTYPE_INNER_PROT_##I, \
- IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
-#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
-#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */
-struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
- /* L2 Packet types */
- IAVF_PTT_UNUSED_ENTRY(0),
- IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT_UNUSED_ENTRY(4),
- IAVF_PTT_UNUSED_ENTRY(5),
- IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT_UNUSED_ENTRY(8),
- IAVF_PTT_UNUSED_ENTRY(9),
- IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(25),
- IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(32),
- IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(39),
- IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(47),
- IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(54),
- IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(62),
- IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(69),
- IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(77),
- IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(84),
- IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(91),
- IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(98),
- IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(105),
- IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(113),
- IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(120),
- IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(128),
- IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(135),
- IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(143),
- IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(150),
- IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
/**
* iavf_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 6f236d1a6444..52273f7eab2c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
+#include <linux/uaccess.h>
+
/* ethtool support for iavf */
#include "iavf.h"
-#include <linux/uaccess.h>
-
/* ethtool statistics helpers */
/**
@@ -239,29 +240,6 @@ static const struct iavf_stats iavf_gstrings_stats[] = {
#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
-/* For now we have one and only one private flag and it is only defined
- * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
- * of leaving all this code sitting around empty we will strip it unless
- * our one private flag is actually available.
- */
-struct iavf_priv_flags {
- char flag_string[ETH_GSTRING_LEN];
- u32 flag;
- bool read_only;
-};
-
-#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
- .flag_string = _name, \
- .flag = _flag, \
- .read_only = _read_only, \
-}
-
-static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
- IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
-};
-
-#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
-
/**
* iavf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -341,8 +319,6 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset)
return IAVF_STATS_LEN +
(IAVF_QUEUE_STATS_LEN * 2 *
netdev->real_num_tx_queues);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return IAVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -385,22 +361,6 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
}
/**
- * iavf_get_priv_flag_strings - Get private flag strings
- * @netdev: network interface device structure
- * @data: buffer for string data
- *
- * Builds the private flags string table
- **/
-static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
-{
- unsigned int i;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&data, "%s",
- iavf_gstrings_priv_flags[i].flag_string);
-}
-
-/**
* iavf_get_stat_strings - Get stat strings
* @netdev: network interface device structure
* @data: buffer for string data
@@ -438,109 +398,12 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
case ETH_SS_STATS:
iavf_get_stat_strings(netdev, data);
break;
- case ETH_SS_PRIV_FLAGS:
- iavf_get_priv_flag_strings(netdev, data);
- break;
default:
break;
}
}
/**
- * iavf_get_priv_flags - report device private flags
- * @netdev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 iavf_get_priv_flags(struct net_device *netdev)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 i, ret_flags = 0;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (priv_flags->flag & adapter->flags)
- ret_flags |= BIT(i);
- }
-
- return ret_flags;
-}
-
-/**
- * iavf_set_priv_flags - set private flags
- * @netdev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 orig_flags, new_flags, changed_flags;
- int ret = 0;
- u32 i;
-
- orig_flags = READ_ONCE(adapter->flags);
- new_flags = orig_flags;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
-
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
- return -EOPNOTSUPP;
- }
-
- /* Before we finalize any flag changes, any checks which we need to
- * perform to determine if the new flags will be supported should go
- * here...
- */
-
- /* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg returns anything but the old value, this means
- * something else must have modified the flags variable since we
- * copied it. We'll just punt with an error and log something in the
- * message buffer.
- */
- if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
- dev_warn(&adapter->pdev->dev,
- "Unable to update adapter->flags as it was modified by another thread...\n");
- return -EAGAIN;
- }
-
- changed_flags = orig_flags ^ new_flags;
-
- /* Process any additional changes needed as a result of flag changes.
- * The changed_flags value reflects the list of bits that were changed
- * in the code above.
- */
-
- /* issue a reset to force legacy-rx change to take effect */
- if (changed_flags & IAVF_FLAG_LEGACY_RX) {
- if (netif_running(netdev)) {
- iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
- ret = iavf_wait_for_reset(adapter);
- if (ret)
- netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
- }
- }
-
- return ret;
-}
-
-/**
* iavf_get_msglevel - Get debug message level
* @netdev: network interface device structure
*
@@ -585,7 +448,6 @@ static void iavf_get_drvinfo(struct net_device *netdev,
strscpy(drvinfo->driver, iavf_driver_name, 32);
strscpy(drvinfo->fw_version, "N/A", 4);
strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -827,18 +689,10 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_coalesce_usecs == 0) {
- if (ec->use_adaptive_rx_coalesce)
- netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
- } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
- (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
+ if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
- } else if (ec->tx_coalesce_usecs == 0) {
- if (ec->use_adaptive_tx_coalesce)
- netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
- } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
- (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
+ } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
@@ -1025,8 +879,7 @@ iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
flex = &fltr->flex_words[cnt++];
flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
- flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
- IAVF_USERDEF_FLEX_OFFS_S;
+ flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value);
if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
return -EINVAL;
}
@@ -1069,7 +922,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *rule = NULL;
int ret = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1211,7 +1064,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
unsigned int cnt = 0;
int val = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
cmd->data = IAVF_MAX_FDIR_FILTERS;
@@ -1403,7 +1256,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
int count = 50;
int err;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
if (fsp->flow_type & FLOW_MAC_EXT)
@@ -1444,12 +1297,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
- spin_unlock_bh(&adapter->fdir_fltr_lock);
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ if (adapter->link_up)
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ else
+ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (adapter->link_up)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
ret:
if (err && fltr)
kfree(fltr);
@@ -1471,7 +1327,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
struct iavf_fdir_fltr *fltr = NULL;
int err = 0;
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
@@ -1479,7 +1335,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (fltr) {
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+ list_del(&fltr->list);
+ kfree(fltr);
+ adapter->fdir_active_fltr--;
+ fltr = NULL;
} else {
err = -EBUSY;
}
@@ -1489,7 +1349,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
return err;
}
@@ -1540,11 +1400,12 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
/**
* iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
* @cmd: ethtool rxnfc command
+ * @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended hash fields for
* RSS configuration
*/
-static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
+static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
@@ -1616,17 +1477,20 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
int count = 50, err = 0;
+ bool symm = false;
u64 hash_flds;
u32 hdrs;
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
+ symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC);
+
hdrs = iavf_adv_rss_parse_hdrs(cmd);
if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
return -EINVAL;
- hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
+ hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm);
if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
return -EINVAL;
@@ -1634,7 +1498,8 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!rss_new)
return -ENOMEM;
- if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
+ if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds,
+ symm)) {
kfree(rss_new);
return -EINVAL;
}
@@ -1653,12 +1518,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (rss_old) {
if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
err = -EBUSY;
- } else if (rss_old->hash_flds != hash_flds) {
+ } else if (rss_old->hash_flds != hash_flds ||
+ rss_old->symm != symm) {
rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_old->hash_flds = hash_flds;
+ rss_old->symm = symm;
memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
sizeof(rss_new->cfg_msg));
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
} else {
err = -EEXIST;
}
@@ -1667,13 +1533,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_new->packet_hdrs = hdrs;
rss_new->hash_flds = hash_flds;
+ rss_new->symm = symm;
list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
}
spin_unlock_bh(&adapter->adv_rss_lock);
if (!err)
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
mutex_unlock(&adapter->crit_lock);
@@ -1788,7 +1654,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
- if (!FDIR_FLTR_SUPPORT(adapter))
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
spin_lock_bh(&adapter->fdir_fltr_lock);
cmd->rule_cnt = adapter->fdir_active_fltr;
@@ -1907,27 +1773,27 @@ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
/**
* iavf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
-static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int iavf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (key)
- memcpy(key, adapter->rss_key, adapter->rss_key_size);
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
- if (indir)
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size);
+
+ if (rxfh->indir)
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
- indir[i] = (u32)adapter->rss_lut[i];
+ rxfh->indir[i] = (u32)adapter->rss_lut[i];
return 0;
}
@@ -1935,33 +1801,46 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
/**
* iavf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
-static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int iavf_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
/* Only support toeplitz hash function */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!key && !indir)
+ if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
+ adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) {
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+ adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
+ adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) {
+ adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ }
+
+ if (!rxfh->key && !rxfh->indir)
return 0;
- if (key)
- memcpy(adapter->rss_key, key, adapter->rss_key_size);
+ if (rxfh->key)
+ memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
+ adapter->rss_lut[i] = (u8)(rxfh->indir[i]);
}
return iavf_config_rss(adapter);
@@ -1970,6 +1849,7 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .cap_rss_sym_xor_supported = true,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
@@ -1977,8 +1857,6 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.get_strings = iavf_get_strings,
.get_ethtool_stats = iavf_get_ethtool_stats,
.get_sset_count = iavf_get_sset_count,
- .get_priv_flags = iavf_get_priv_flags,
- .set_priv_flags = iavf_set_priv_flags,
.get_msglevel = iavf_get_msglevel,
.set_msglevel = iavf_set_msglevel,
.get_coalesce = iavf_get_coalesce,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 03e774bd2a5b..2d47b0b4640e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -3,6 +3,7 @@
/* flow director ethtool support for iavf */
+#include <linux/bitfield.h>
#include "iavf.h"
#define GTPU_PORT 2152
@@ -357,7 +358,7 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
if (fltr->ip_mask.tclass == U8_MAX) {
iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
- iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0;
+ iph->flow_lbl[0] = FIELD_PREP(0xF0, fltr->ip_data.tclass);
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index 9eb9f73f6adf..d31bd923ba8c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -6,12 +6,25 @@
struct iavf_adapter;
-/* State of Flow Director filter */
+/* State of Flow Director filter
+ *
+ * *_REQUEST states are used to mark filter to be sent to PF driver to perform
+ * an action (either add or delete filter). *_PENDING states are an indication
+ * that request was sent to PF and the driver is waiting for response.
+ *
+ * Both DELETE and DISABLE states are being used to delete a filter in PF.
+ * The difference is that after a successful response filter in DEL_PENDING
+ * state is being deleted from VF driver as well and filter in DIS_PENDING state
+ * is being changed to INACTIVE state.
+ */
enum iavf_fdir_fltr_state_t {
IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
+ IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */
+ IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */
+ IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */
IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c862ebcd2e39..c6dff0963053 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/net/intel/libie/rx.h>
+
#include "iavf.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
@@ -45,6 +47,8 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
+MODULE_IMPORT_NS(LIBETH);
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
@@ -277,27 +281,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
}
/**
- * iavf_lock_timeout - try to lock mutex but give up after timeout
- * @lock: mutex that should be locked
- * @msecs: timeout in msecs
- *
- * Returns 0 on success, negative on failure
- **/
-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
-{
- unsigned int wait, delay = 10;
-
- for (wait = 0; wait < msecs; wait += delay) {
- if (mutex_trylock(lock))
- return 0;
-
- msleep(delay);
- }
-
- return -1;
-}
-
-/**
* iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
* @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
@@ -735,40 +718,10 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
**/
static void iavf_configure_rx(struct iavf_adapter *adapter)
{
- unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
struct iavf_hw *hw = &adapter->hw;
- int i;
-
- /* Legacy Rx will always default to a 2048 buffer size. */
-#if (PAGE_SIZE < 8192)
- if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
- struct net_device *netdev = adapter->netdev;
-
- /* For jumbo frames on systems with 4K pages we have to use
- * an order 1 page, so we might as well increase the size
- * of our Rx buffer to make better use of the available space
- */
- rx_buf_len = IAVF_RXBUFFER_3072;
-
- /* We use a 1536 buffer size for configurations with
- * standard Ethernet mtu. On x86 this gives us enough room
- * for shared info and 192 bytes of padding.
- */
- if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
- (netdev->mtu <= ETH_DATA_LEN))
- rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
- }
-#endif
- for (i = 0; i < adapter->num_active_queues; i++) {
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = rx_buf_len;
-
- if (adapter->flags & IAVF_FLAG_LEGACY_RX)
- clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
- else
- set_ring_build_skb_enabled(&adapter->rx_rings[i]);
- }
}
/**
@@ -1059,13 +1012,12 @@ static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
*/
new_f->is_primary = true;
new_f->add = true;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
ether_addr_copy(hw->mac.addr, new_mac);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* schedule the watchdog task to immediately process the request */
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER);
return 0;
}
@@ -1284,8 +1236,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
iavf_napi_enable_all(adapter);
- adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES);
}
/**
@@ -1353,18 +1304,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
**/
static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{
- struct iavf_fdir_fltr *fdir, *fdirtmp;
+ struct iavf_fdir_fltr *fdir;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
- list) {
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
- list_del(&fdir->list);
- kfree(fdir);
- adapter->fdir_active_fltr--;
- } else {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ /* Cancel a request, keep filter as inactive */
+ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* Disable filters which are active or have a pending
+ * request to PF to be added
+ */
+ fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -1439,8 +1392,7 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
}
- adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES);
}
/**
@@ -1637,7 +1589,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
- rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
rx_ring->itr_setting = IAVF_ITR_RX_DEF;
}
@@ -2169,6 +2120,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_set_rss_lut(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
+ iavf_set_rss_hfunc(adapter);
+ return 0;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
iavf_set_promiscuous(adapter);
@@ -2188,19 +2143,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
-
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
iavf_del_cloud_filter(adapter);
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
- iavf_del_cloud_filter(adapter);
- return 0;
- }
- if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
- iavf_add_cloud_filter(adapter);
- return 0;
- }
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
iavf_add_fdir_filter(adapter);
return IAVF_SUCCESS;
@@ -2337,10 +2283,8 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
}
}
- if (aq_required) {
- adapter->aq_required |= aq_required;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
- }
+ if (aq_required)
+ iavf_schedule_aq_request(adapter, aq_required);
}
/**
@@ -2671,9 +2615,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
iavf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- /* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
+ netdev->max_mtu = LIBIE_MAX_MTU;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -3253,7 +3196,7 @@ static void iavf_adminq_task(struct work_struct *work)
goto freedom;
/* check for error indications */
- val = rd32(hw, hw->aq.arq.len);
+ val = rd32(hw, IAVF_VF_ARQLEN1);
if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
goto freedom;
oldval = val;
@@ -3270,9 +3213,9 @@ static void iavf_adminq_task(struct work_struct *work)
val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
}
if (oldval != val)
- wr32(hw, hw->aq.arq.len, val);
+ wr32(hw, IAVF_VF_ARQLEN1, val);
- val = rd32(hw, hw->aq.asq.len);
+ val = rd32(hw, IAVF_VF_ATQLEN1);
oldval = val;
if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
@@ -3287,7 +3230,7 @@ static void iavf_adminq_task(struct work_struct *work)
val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
}
if (oldval != val)
- wr32(hw, hw->aq.asq.len, val);
+ wr32(hw, IAVF_VF_ATQLEN1, val);
freedom:
kfree(event.msg_buf);
@@ -3532,6 +3475,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt *mqprio_qopt)
+{
+ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+ int i;
+
+ if (adapter->num_tc != mqprio_qopt->num_tc)
+ return false;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ if (ch[i].count != mqprio_qopt->count[i] ||
+ ch[i].offset != mqprio_qopt->offset[i])
+ return false;
+ }
+ return true;
+}
+
+/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_data: tc offload data
@@ -3588,7 +3559,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
- if (adapter->num_tc == num_tc)
+ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;
@@ -3786,6 +3757,10 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -4113,6 +4088,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
/**
+ * iavf_restore_fdir_filters
+ * @adapter: board private structure
+ *
+ * Restore existing FDIR filters when VF netdev comes back up.
+ **/
+static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *f;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(f, &adapter->fdir_list_head, list) {
+ if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+ /* Cancel a request, keep filter as active */
+ f->state = IAVF_FDIR_FLTR_ACTIVE;
+ } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
+ f->state == IAVF_FDIR_FLTR_INACTIVE) {
+ /* Add filters which are inactive or have a pending
+ * request to PF to be deleted
+ */
+ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
* iavf_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -4179,8 +4181,9 @@ static int iavf_open(struct net_device *netdev)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- /* Restore VLAN filters that were removed with IFF_DOWN */
+ /* Restore filters that were removed with IFF_DOWN */
iavf_restore_filters(adapter);
+ iavf_restore_fdir_filters(adapter);
iavf_configure(adapter);
@@ -4297,7 +4300,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
@@ -4311,6 +4314,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+/**
+ * iavf_disable_fdir - disable Flow Director and clear existing filters
+ * @adapter: board private structure
+ **/
+static void iavf_disable_fdir(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
+ bool del_filters = false;
+
+ adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
+
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+ fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
+ /* Delete filters not registered in PF */
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+ fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* Filters registered in PF, schedule their deletion */
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ del_filters = true;
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ /* Request to delete filter already sent to PF, change
+ * state to DEL_PENDING to delete filter after PF's
+ * response, not set as INACTIVE
+ */
+ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (del_filters) {
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+}
+
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_RX | \
@@ -4336,6 +4382,13 @@ static int iavf_set_features(struct net_device *netdev,
((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+ if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
+ if (features & NETIF_F_NTUPLE)
+ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+ else
+ iavf_disable_fdir(adapter);
+ }
+
return 0;
}
@@ -4365,12 +4418,12 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
@@ -4685,6 +4738,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
features = iavf_fix_netdev_vlan_features(adapter, features);
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ features &= ~NETIF_F_NTUPLE;
+
return iavf_fix_strip_features(adapter, features);
}
@@ -4802,6 +4858,12 @@ int iavf_process_config(struct iavf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (FDIR_FLTR_SUPPORT(adapter)) {
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->features |= NETIF_F_NTUPLE;
+ adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
+ }
+
netdev->priv_flags |= IFF_UNICAST_FLT;
/* Do not turn on offloads when they are requested to be turned off.
@@ -4826,34 +4888,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
/**
- * iavf_shutdown - Shutdown the device in preparation for a reboot
- * @pdev: pci device structure
- **/
-static void iavf_shutdown(struct pci_dev *pdev)
-{
- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- struct net_device *netdev = adapter->netdev;
-
- netif_device_detach(netdev);
-
- if (netif_running(netdev))
- iavf_close(netdev);
-
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
- /* Prevent the watchdog from running. */
- iavf_change_state(adapter, __IAVF_REMOVE);
- adapter->aq_required = 0;
- mutex_unlock(&adapter->crit_lock);
-
-#ifdef CONFIG_PM
- pci_save_state(pdev);
-
-#endif
- pci_disable_device(pdev);
-}
-
-/**
* iavf_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in iavf_pci_tbl
@@ -4993,7 +5027,7 @@ err_dma:
*
* Called when the system (VM) is entering sleep/suspend.
**/
-static int __maybe_unused iavf_suspend(struct device *dev_d)
+static int iavf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -5021,7 +5055,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
-static int __maybe_unused iavf_resume(struct device *dev_d)
+static int iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter;
@@ -5063,16 +5097,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
**/
static void iavf_remove(struct pci_dev *pdev)
{
- struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
+ struct iavf_adapter *adapter;
struct net_device *netdev;
struct iavf_hw *hw;
- netdev = adapter->netdev;
+ /* Don't proceed with remove if netdev is already freed */
+ netdev = pci_get_drvdata(pdev);
+ if (!netdev)
+ return;
+
+ adapter = iavf_pdev_to_adapter(pdev);
hw = &adapter->hw;
if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
@@ -5184,19 +5223,33 @@ static void iavf_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->wq);
+ pci_set_drvdata(pdev, NULL);
+
free_netdev(netdev);
pci_disable_device(pdev);
}
-static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
+/**
+ * iavf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void iavf_shutdown(struct pci_dev *pdev)
+{
+ iavf_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
static struct pci_driver iavf_driver = {
.name = iavf_driver_name,
.id_table = iavf_pci_tbl,
.probe = iavf_probe,
.remove = iavf_remove,
- .driver.pm = &iavf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&iavf_pm_ops),
.shutdown = iavf_shutdown,
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index 4a48e6171405..48c3901381b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -45,13 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
struct iavf_aqc_get_set_rss_key_data *key);
-extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
-
-static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return iavf_ptype_lookup[ptype];
-}
-
void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg);
enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index d64c4997136b..26b424fd6718 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
+#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include "iavf.h"
@@ -183,7 +185,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
* pending work.
*/
packets = tx_ring->stats.packets & INT_MAX;
- if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ if (tx_ring->prev_pkt_ctr == packets) {
iavf_force_wb(vsi, tx_ring->q_vector);
continue;
}
@@ -192,7 +194,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
* to iavf_get_tx_pending()
*/
smp_rmb();
- tx_ring->tx_stats.prev_pkt_ctr =
+ tx_ring->prev_pkt_ctr =
iavf_get_tx_pending(tx_ring, true) ? packets : -1;
}
}
@@ -318,7 +320,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__IAVF_VSI_DOWN, vsi->state) &&
(IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
+ tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB;
}
/* notify netdev of completed buffers */
@@ -673,7 +675,7 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->tx_stats.prev_pkt_ctr = -1;
+ tx_ring->prev_pkt_ctr = -1;
return 0;
err:
@@ -688,11 +690,8 @@ err:
**/
static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{
- unsigned long bi_size;
- u16 i;
-
/* ring already cleared, nothing to do */
- if (!rx_ring->rx_bi)
+ if (!rx_ring->rx_fqes)
return;
if (rx_ring->skb) {
@@ -700,41 +699,16 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
rx_ring->skb = NULL;
}
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ /* Free all the Rx ring buffers */
+ for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
+ const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i];
- if (!rx_bi->page)
- continue;
+ page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false);
- /* Invalidate cache lines that may have been written to by
- * device so that we avoid corrupting memory.
- */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->dma,
- rx_bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
-
- /* free resources associated with mapping */
- dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IAVF_RX_DMA_ATTR);
-
- __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
-
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
}
- bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_bi, 0, bi_size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -747,15 +721,22 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
**/
void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{
+ struct libeth_fq fq = {
+ .fqes = rx_ring->rx_fqes,
+ .pp = rx_ring->pp,
+ };
+
iavf_clean_rx_ring(rx_ring);
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
if (rx_ring->desc) {
- dma_free_coherent(rx_ring->dev, rx_ring->size,
+ dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
+
+ libeth_rx_fq_destroy(&fq);
+ rx_ring->rx_fqes = NULL;
+ rx_ring->pp = NULL;
}
/**
@@ -766,38 +747,46 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring)
**/
int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- int bi_size;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_bi);
- bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
- rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
- if (!rx_ring->rx_bi)
- goto err;
+ struct libeth_fq fq = {
+ .count = rx_ring->count,
+ .buf_len = LIBIE_MAX_RX_BUF_LEN,
+ .nid = NUMA_NO_NODE,
+ };
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi);
+ if (ret)
+ return ret;
+
+ rx_ring->pp = fq.pp;
+ rx_ring->rx_fqes = fq.fqes;
+ rx_ring->truesize = fq.truesize;
+ rx_ring->rx_buf_len = fq.buf_len;
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
- dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
rx_ring->size);
goto err;
}
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
return 0;
+
err:
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
+ libeth_rx_fq_destroy(&fq);
+ rx_ring->rx_fqes = NULL;
+ rx_ring->pp = NULL;
+
return -ENOMEM;
}
@@ -810,9 +799,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -823,69 +809,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
}
/**
- * iavf_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
-}
-
-/**
- * iavf_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buffer struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
- **/
-static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- /* alloc new page for storage */
- page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IAVF_RX_DMA_ATTR);
-
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, iavf_rx_pg_order(rx_ring));
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = iavf_rx_offset(rx_ring);
-
- /* initialize pagecnt_bias to 1 representing we fully own page */
- bi->pagecnt_bias = 1;
-
- return true;
-}
-
-/**
* iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @skb: packet to send up
@@ -915,38 +838,37 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring,
**/
bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
{
+ const struct libeth_fq_fp fq = {
+ .pp = rx_ring->pp,
+ .fqes = rx_ring->rx_fqes,
+ .truesize = rx_ring->truesize,
+ .count = rx_ring->count,
+ };
u16 ntu = rx_ring->next_to_use;
union iavf_rx_desc *rx_desc;
- struct iavf_rx_buffer *bi;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
rx_desc = IAVF_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
do {
- if (!iavf_alloc_mapped_page(rx_ring, bi))
- goto no_buffers;
+ dma_addr_t addr;
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ addr = libeth_rx_alloc(&fq, ntu);
+ if (addr == DMA_MAPPING_ERROR)
+ goto no_buffers;
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(addr);
rx_desc++;
- bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = IAVF_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
ntu = 0;
}
@@ -965,6 +887,8 @@ no_buffers:
if (rx_ring->next_to_use != ntu)
iavf_release_rx_desc(rx_ring, ntu);
+ rx_ring->rx_stats.alloc_page_failed++;
+
/* make sure to come back via polling to try again after
* allocation failure
*/
@@ -981,40 +905,30 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi,
struct sk_buff *skb,
union iavf_rx_desc *rx_desc)
{
- struct iavf_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
- rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
- IAVF_RXD_QW1_ERROR_SHIFT;
- rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
- IAVF_RXD_QW1_STATUS_SHIFT;
- decoded = decode_rx_desc_ptype(ptype);
-
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
- /* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
return;
+ rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
+
/* did the hardware decode the packet and checksum? */
if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* both known and outer_ip must be set for the below code to work */
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 &&
(rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1038,17 +952,7 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi,
if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case IAVF_RX_PTYPE_INNER_PROT_TCP:
- case IAVF_RX_PTYPE_INNER_PROT_UDP:
- case IAVF_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- fallthrough;
- default:
- break;
- }
-
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -1056,29 +960,6 @@ checksum_fail:
}
/**
- * iavf_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- **/
-static int iavf_ptype_to_htype(u8 ptype)
-{
- struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
-
- if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- else
- return PKT_HASH_TYPE_L2;
-}
-
-/**
* iavf_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
@@ -1090,17 +971,19 @@ static void iavf_rx_hash(struct iavf_ring *ring,
struct sk_buff *skb,
u8 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (!(ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -1153,95 +1036,9 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
}
/**
- * iavf_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *old_buff)
-{
- struct iavf_rx_buffer *new_buff;
- u16 nta = rx_ring->next_to_alloc;
-
- new_buff = &rx_ring->rx_bi[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_buff->dma = old_buff->dma;
- new_buff->page = old_buff->page;
- new_buff->page_offset = old_buff->page_offset;
- new_buff->pagecnt_bias = old_buff->pagecnt_bias;
-}
-
-/**
- * iavf_can_reuse_rx_page - Determine if this page can be reused by
- * the adapter for another receive
- *
- * @rx_buffer: buffer containing the page
- *
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
- * an unused region in the page.
- *
- * For small pages, @truesize will be a constant value, half the size
- * of the memory at page. We'll attempt to alternate between high and
- * low halves of the page, with one half ready for use by the hardware
- * and the other half being consumed by the stack. We use the page
- * ref count to determine whether the stack has finished consuming the
- * portion of this page that was passed up with a previous packet. If
- * the page ref count is >1, we'll assume the "other" half page is
- * still busy, and this page cannot be reused.
- *
- * For larger pages, @truesize will be the actual space used by the
- * received packet (adjusted upward to an even multiple of the cache
- * line size). This will advance through the page by the amount
- * actually consumed by the received packets while there is still
- * space for a buffer. Each region of larger pages will be used at
- * most once, after which the page will not be reused.
- *
- * In either case, if the page is reusable its refcount is increased.
- **/
-static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
-{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
- struct page *page = rx_buffer->page;
-
- /* Is any reuse possible? */
- if (!dev_page_is_reusable(page))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
- return false;
-#else
-#define IAVF_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
- if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
- return false;
-#endif
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
- rx_buffer->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
* iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
* @skb: sk_buff to place the data into
+ * @rx_buffer: buffer containing page to add
* @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1249,206 +1046,50 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
*
* The function will then update the page offset.
**/
-static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
- struct sk_buff *skb,
+static void iavf_add_rx_frag(struct sk_buff *skb,
+ const struct libeth_fqe *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
-#endif
-
- if (!size)
- return;
+ u32 hr = rx_buffer->page->pp->p.offset;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->page_offset, size, truesize);
-
- /* page is being used so we must update the page offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-}
-
-/**
- * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
- * @rx_ring: rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- *
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
- */
-static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
- const unsigned int size)
-{
- struct iavf_rx_buffer *rx_buffer;
-
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- prefetchw(rx_buffer->page);
- if (!size)
- return rx_buffer;
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buffer->pagecnt_bias--;
-
- return rx_buffer;
-}
-
-/**
- * iavf_construct_skb - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- * @size: size of buffer to add to skb
- *
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
- unsigned int size)
-{
- void *va;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#endif
- unsigned int headlen;
- struct sk_buff *skb;
-
- if (!rx_buffer)
- return NULL;
- /* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- net_prefetch(va);
-
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- IAVF_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- return NULL;
-
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > IAVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
- /* update all of the pointers */
- size -= headlen;
- if (size) {
- skb_add_rx_frag(skb, 0, rx_buffer->page,
- rx_buffer->page_offset + headlen,
- size, truesize);
-
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
- } else {
- /* buffer is unused, reset bias back to rx_buffer */
- rx_buffer->pagecnt_bias++;
- }
-
- return skb;
+ rx_buffer->offset + hr, size, rx_buffer->truesize);
}
/**
* iavf_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
* @size: size of buffer to add to skb
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
-static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
+static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- void *va;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
-#endif
+ u32 hr = rx_buffer->page->pp->p.offset;
struct sk_buff *skb;
+ void *va;
- if (!rx_buffer || !size)
- return NULL;
/* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- net_prefetch(va);
+ va = page_address(rx_buffer->page) + rx_buffer->offset;
+ net_prefetch(va + hr);
/* build an skb around the page buffer */
- skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
+ skb = napi_build_skb(va, rx_buffer->truesize);
if (unlikely(!skb))
return NULL;
+ skb_mark_for_recycle(skb);
+
/* update pointers within the skb to store the data */
- skb_reserve(skb, IAVF_SKB_PAD);
+ skb_reserve(skb, hr);
__skb_put(skb, size);
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-
return skb;
}
/**
- * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- *
- * This function will clean up the contents of the rx_buffer. It will
- * either recycle the buffer or unmap it and free the associated resources.
- */
-static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer)
-{
- if (!rx_buffer)
- return;
-
- if (iavf_can_reuse_rx_page(rx_buffer)) {
- /* hand second half of page back to the ring */
- iavf_reuse_rx_page(rx_ring, rx_buffer);
- rx_ring->rx_stats.page_reuse_count++;
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buffer->page,
- rx_buffer->pagecnt_bias);
- }
-
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
-}
-
-/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
@@ -1501,7 +1142,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
- struct iavf_rx_buffer *rx_buffer;
+ struct libeth_fqe *rx_buffer;
union iavf_rx_desc *rx_desc;
unsigned int size;
u16 vlan_tag = 0;
@@ -1533,32 +1174,30 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
break;
- size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = iavf_get_rx_buffer(rx_ring, size);
+
+ rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
+ if (!libeth_rx_sync_for_cpu(rx_buffer, size))
+ goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
- skb = iavf_build_skb(rx_ring, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, size);
else
- skb = iavf_construct_skb(rx_ring, rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, size);
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer && size)
- rx_buffer->pagecnt_bias++;
break;
}
- iavf_put_rx_buffer(rx_ring, rx_buffer);
+skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb))
+ if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb))
continue;
/* ERR_MASK will only have valid bits if EOP set, and
@@ -1581,8 +1220,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
total_rx_bytes += skb->len;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
- IAVF_RXD_QW1_PTYPE_SHIFT;
+ rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
/* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
@@ -1746,8 +1384,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
clean_complete = false;
continue;
}
- arm_wb |= ring->arm_wb;
- ring->arm_wb = false;
+ arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB);
+ ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB;
}
/* Handle case where we are called by netpoll with a budget of 0 */
@@ -2290,8 +1928,7 @@ static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
- IAVF_TX_FLAGS_VLAN_SHIFT;
+ td_tag = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
}
first->tx_flags = tx_flags;
@@ -2467,8 +2104,7 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
IAVF_TXD_CTX_QW1_CMD_SHIFT;
- cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
- IAVF_TX_FLAGS_VLAN_SHIFT;
+ cd_l2tag2 = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
}
/* obtain protocol of skb */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 7e6ee32d19b6..d7b5587aeb8e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -15,7 +15,6 @@
*/
#define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
-#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */
#define IAVF_ITR_100K 10 /* all values below must be even */
#define IAVF_ITR_50K 20
#define IAVF_ITR_20K 50
@@ -81,79 +80,8 @@ enum iavf_dyn_idx_t {
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-/* Supported Rx Buffer Sizes (a multiple of 128) */
-#define IAVF_RXBUFFER_256 256
-#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
-#define IAVF_RXBUFFER_2048 2048
-#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
-#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */
-
-/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
- * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
- */
-#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
-#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
#define iavf_rx_desc iavf_32byte_rx_desc
-#define IAVF_RX_DMA_ATTR \
- (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-
-/* Attempt to maximize the headroom available for incoming frames. We
- * use a 2K buffer for receives and need 1536/1534 to store the data for
- * the frame. This leaves us with 512 bytes of room. From that we need
- * to deduct the space needed for the shared info and the padding needed
- * to IP align the frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define IAVF_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
-
-static inline int iavf_compute_pad(int rx_buf_len)
-{
- int page_size, pad_size;
-
- page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
-
- return pad_size;
-}
-
-static inline int iavf_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (IAVF_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = IAVF_RXBUFFER_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return iavf_compute_pad(rx_buf_len);
-}
-
-#define IAVF_SKB_PAD iavf_skb_pad()
-#else
-#define IAVF_2K_TOO_SMALL_WITH_PADDING false
-#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
-
/**
* iavf_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
@@ -272,17 +200,6 @@ struct iavf_tx_buffer {
u32 tx_flags;
};
-struct iavf_rx_buffer {
- dma_addr_t dma;
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
-#else
- __u16 page_offset;
-#endif
- __u16 pagecnt_bias;
-};
-
struct iavf_queue_stats {
u64 packets;
u64 bytes;
@@ -294,7 +211,6 @@ struct iavf_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
- int prev_pkt_ctr;
u64 tx_lost_interrupt;
};
@@ -302,14 +218,6 @@ struct iavf_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buff_failed;
- u64 page_reuse_count;
- u64 realloc_count;
-};
-
-enum iavf_ring_state_t {
- __IAVF_TX_FDIR_INIT_DONE,
- __IAVF_TX_XPS_INIT_DONE,
- __IAVF_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
@@ -327,16 +235,19 @@ enum iavf_ring_state_t {
struct iavf_ring {
struct iavf_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
- struct device *dev; /* Used for DMA mapping */
+ union {
+ struct page_pool *pp; /* Used on Rx for buffer management */
+ struct device *dev; /* Used on Tx for DMA mapping */
+ };
struct net_device *netdev; /* netdev ring maps to */
union {
+ struct libeth_fqe *rx_fqes;
struct iavf_tx_buffer *tx_bi;
- struct iavf_rx_buffer *rx_bi;
};
- DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
- u16 queue_index; /* Queue number of ring */
- u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ u32 truesize;
+
+ u16 queue_index; /* Queue number of ring */
/* high bit set means dynamic, use accessors routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
@@ -346,23 +257,15 @@ struct iavf_ring {
u16 itr_setting;
u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
- u16 rx_buf_len;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u8 atr_sample_rate;
- u8 atr_count;
-
- bool ring_active; /* is ring online or not */
- bool arm_wb; /* do something to arm write back */
- u8 packet_stride;
-
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
+/* BIT(2) is free */
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
@@ -375,6 +278,7 @@ struct iavf_ring {
struct iavf_rx_queue_stats rx_stats;
};
+ int prev_pkt_ctr; /* For Tx stall detection */
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
@@ -382,7 +286,6 @@ struct iavf_ring {
struct iavf_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
- u16 next_to_alloc;
struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must
* return before it sees the EOP for
* the current packet, we save that skb
@@ -391,22 +294,9 @@ struct iavf_ring {
* iavf_clean_rx_ring_irq() is called
* for this ring.
*/
-} ____cacheline_internodealigned_in_smp;
-
-static inline bool ring_uses_build_skb(struct iavf_ring *ring)
-{
- return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
-}
-static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
-{
- ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
-
-static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
-{
- ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
+ u32 rx_buf_len;
+} ____cacheline_internodealigned_in_smp;
#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
@@ -429,17 +319,6 @@ struct iavf_ring_container {
#define iavf_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
-{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
- return 0;
-}
-
-#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
-
bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index 2b6a207fa441..f6b09e57abce 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -10,8 +10,6 @@
#include "iavf_adminq.h"
#include "iavf_devids.h"
-#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
-
/* IAVF_MASK is a macro used on 32 bit registers */
#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift))
@@ -327,94 +325,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
#define IAVF_RXD_QW1_PTYPE_SHIFT 30
#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-/* Packet type non-ip values */
-enum iavf_rx_l2_ptype {
- IAVF_RX_PTYPE_L2_RESERVED = 0,
- IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
- IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
- IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
- IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
- IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
- IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
- IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
- IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- IAVF_RX_PTYPE_L2_ARP = 11,
- IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
- IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct iavf_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum iavf_rx_ptype_outer_ip {
- IAVF_RX_PTYPE_OUTER_L2 = 0,
- IAVF_RX_PTYPE_OUTER_IP = 1
-};
-
-enum iavf_rx_ptype_outer_ip_ver {
- IAVF_RX_PTYPE_OUTER_NONE = 0,
- IAVF_RX_PTYPE_OUTER_IPV4 = 0,
- IAVF_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum iavf_rx_ptype_outer_fragmented {
- IAVF_RX_PTYPE_NOT_FRAG = 0,
- IAVF_RX_PTYPE_FRAG = 1
-};
-
-enum iavf_rx_ptype_tunnel_type {
- IAVF_RX_PTYPE_TUNNEL_NONE = 0,
- IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum iavf_rx_ptype_tunnel_end_prot {
- IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
- IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum iavf_rx_ptype_inner_prot {
- IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
- IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
- IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
- IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
- IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
- IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum iavf_rx_ptype_payload_layer {
- IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 64c4443dbef9..1e543f6a7c30 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/net/intel/libie/rx.h>
+
#include "iavf.h"
#include "iavf_prototype.h"
@@ -268,13 +270,13 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
+ u32 i, max_frame;
size_t len;
- if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
- max_frame = IAVF_MAX_RXBUFFER;
+ max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
+ max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -288,11 +290,6 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
- /* Limit maximum frame size when jumbo frames is not enabled */
- if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
- (adapter->netdev->mtu <= ETH_DATA_LEN))
- max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
-
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -309,9 +306,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
- vqpi->rxq.databuffer_size =
- ALIGN(adapter->rx_rings[i].rx_buf_len,
- BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
@@ -1142,6 +1137,34 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_rss_hfunc
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS Hash function
+ **/
+void iavf_set_rss_hfunc(struct iavf_adapter *adapter)
+{
+ struct virtchnl_rss_hfunc *vrh;
+ int len = sizeof(*vrh);
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh)
+ return;
+ vrh->vsi_id = adapter->vsi.id;
+ vrh->rss_algorithm = adapter->hfunc;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len);
+ kfree(vrh);
+}
+
+/**
* iavf_enable_vlan_stripping
* @adapter: adapter structure
*
@@ -1735,8 +1758,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter)
**/
void iavf_del_fdir_filter(struct iavf_adapter *adapter)
{
+ struct virtchnl_fdir_del f = {};
struct iavf_fdir_fltr *fdir;
- struct virtchnl_fdir_del f;
bool process_fltr = false;
int len;
@@ -1753,11 +1776,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter)
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
process_fltr = true;
- memset(&f, 0, len);
f.vsi_id = fdir->vc_add_msg.vsi_id;
f.flow_id = fdir->flow_id;
fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
break;
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
+ process_fltr = true;
+ f.vsi_id = fdir->vc_add_msg.vsi_id;
+ f.flow_id = fdir->flow_id;
+ fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
+ break;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -1902,6 +1930,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
}
/**
+ * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
+ * @adapter: private adapter structure
+ *
+ * Called after a reset to re-add all FDIR filters and delete some of them
+ * if they were pending to be deleted.
+ */
+static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *f, *ftmp;
+ bool add_filters = false;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
+ if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
+ f->state == IAVF_FDIR_FLTR_ACTIVE) {
+ /* All filters and requests have been removed in PF,
+ * restore them
+ */
+ f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ add_filters = true;
+ } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ /* Link down state, leave filters as inactive */
+ f->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
+ f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ /* Delete filters that were pending to be deleted, the
+ * list on PF is already cleared after a reset
+ */
+ list_del(&f->list);
+ kfree(f);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (add_filters)
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2078,7 +2148,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head,
list) {
- if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
+ fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
iavf_stat_str(&adapter->hw,
@@ -2142,6 +2213,19 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+
+ if (adapter->hfunc ==
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ adapter->hfunc =
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ else
+ adapter->hfunc =
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -2214,6 +2298,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ iavf_activate_fdir_filters(adapter);
+
iavf_parse_vf_resource_msg(adapter);
/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
@@ -2390,7 +2476,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
- if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+ del_fltr->status ==
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
fdir->loc);
list_del(&fdir->list);
@@ -2402,6 +2490,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
del_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
}
+ } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
+ del_fltr->status ==
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
+ fdir->state = IAVF_FDIR_FLTR_INACTIVE;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
+ del_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 0679907980f7..03500e28ac99 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -5,6 +5,7 @@
# Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_ICE) += ice.o
ice-y := ice_main.o \
@@ -28,13 +29,17 @@ ice-y := ice_main.o \
ice_flex_pipe.o \
ice_flow.o \
ice_idc.o \
- ice_devlink.o \
+ devlink/devlink.o \
+ devlink/devlink_port.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o \
ice_repr.o \
- ice_tc_lib.o
+ ice_tc_lib.o \
+ ice_fwlog.o \
+ ice_debugfs.o \
+ ice_adapter.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
ice_virtchnl.o \
@@ -49,3 +54,4 @@ ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o
+ice-$(CONFIG_ICE_HWMON) += ice_hwmon.o
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 80dc5445b50d..c4b69655cdf5 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -5,13 +5,11 @@
#include "ice.h"
#include "ice_lib.h"
-#include "ice_devlink.h"
+#include "devlink.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
-static int ice_active_port_option = -1;
-
/* context for devlink info version reporting */
struct ice_info_ctx {
char buf[128];
@@ -193,6 +191,24 @@ ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
}
+static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ u32 id, cfg_ver, fw_ver;
+
+ if (!ice_is_feature_supported(pf, ICE_F_CGU))
+ return;
+ if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver))
+ return;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver);
+}
+
+static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ if (!ice_is_feature_supported(pf, ICE_F_CGU))
+ return;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number);
+}
+
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
@@ -235,6 +251,8 @@ static const struct ice_devlink_version {
running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
+ fixed("cgu.id", ice_info_cgu_id),
+ running("fw.cgu", ice_info_cgu_fw_build),
};
/**
@@ -425,6 +443,20 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
}
/**
+ * ice_devlink_reinit_down - unload given PF
+ * @pf: pointer to the PF struct
+ */
+static void ice_devlink_reinit_down(struct ice_pf *pf)
+{
+ /* No need to take devl_lock, it's already taken by devlink API */
+ ice_unload(pf);
+ rtnl_lock();
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ rtnl_unlock();
+ ice_deinit_dev(pf);
+}
+
+/**
* ice_devlink_reload_down - prepare for reload
* @devlink: pointer to the devlink instance to reload
* @netns_change: if true, the network namespace is changing
@@ -444,20 +476,20 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
if (ice_is_eswitch_mode_switchdev(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Go to legacy mode before doing reinit\n");
+ "Go to legacy mode before doing reinit");
return -EOPNOTSUPP;
}
if (ice_is_adq_active(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Turn off ADQ before doing reinit\n");
+ "Turn off ADQ before doing reinit");
return -EOPNOTSUPP;
}
if (ice_has_vfs(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Remove all VFs before doing reinit\n");
+ "Remove all VFs before doing reinit");
return -EOPNOTSUPP;
}
- ice_unload(pf);
+ ice_devlink_reinit_down(pf);
return 0;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
return ice_devlink_reload_empr_start(pf, extack);
@@ -492,248 +524,153 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf,
}
/**
- * ice_devlink_port_opt_speed_str - convert speed to a string
- * @speed: speed value
- */
-static const char *ice_devlink_port_opt_speed_str(u8 speed)
-{
- switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
- case ICE_AQC_PORT_OPT_MAX_LANE_100M:
- return "0.1";
- case ICE_AQC_PORT_OPT_MAX_LANE_1G:
- return "1";
- case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
- return "2.5";
- case ICE_AQC_PORT_OPT_MAX_LANE_5G:
- return "5";
- case ICE_AQC_PORT_OPT_MAX_LANE_10G:
- return "10";
- case ICE_AQC_PORT_OPT_MAX_LANE_25G:
- return "25";
- case ICE_AQC_PORT_OPT_MAX_LANE_50G:
- return "50";
- case ICE_AQC_PORT_OPT_MAX_LANE_100G:
- return "100";
- }
-
- return "-";
-}
-
-#define ICE_PORT_OPT_DESC_LEN 50
-/**
- * ice_devlink_port_options_print - Print available port split options
- * @pf: the PF to print split port options
+ * ice_get_tx_topo_user_sel - Read user's choice from flash
+ * @pf: pointer to pf structure
+ * @layers: value read from flash will be saved here
*
- * Prints a table with available port split options and max port speeds
+ * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV.
+ *
+ * Return: zero when read was successful, negative values otherwise.
*/
-static void ice_devlink_port_options_print(struct ice_pf *pf)
+static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers)
{
- u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
- struct ice_aqc_get_port_options_elem *options, *opt;
- struct device *dev = ice_pf_to_dev(pf);
- bool active_valid, pending_valid;
- char desc[ICE_PORT_OPT_DESC_LEN];
- const char *str;
- int status;
+ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
+ struct ice_hw *hw = &pf->hw;
+ int err;
- options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
- sizeof(*options), GFP_KERNEL);
- if (!options)
- return;
+ err = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (err)
+ return err;
- for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
- opt = options + i * ICE_AQC_PORT_OPT_MAX;
- options_count = ICE_AQC_PORT_OPT_MAX;
- active_valid = 0;
+ err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
+ sizeof(usr_sel), &usr_sel, true, true, NULL);
+ if (err)
+ goto exit_release_res;
- status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
- i, true, &active_idx,
- &active_valid, &pending_idx,
- &pending_valid);
- if (status) {
- dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
- i, status);
- goto err;
- }
- }
+ if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL)
+ *layers = ICE_SCHED_5_LAYERS;
+ else
+ *layers = ICE_SCHED_9_LAYERS;
- dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
- dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
- dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+exit_release_res:
+ ice_release_nvm(hw);
- for (i = 0; i < options_count; i++) {
- cnt = 0;
+ return err;
+}
- if (i == ice_active_port_option)
- str = "Active";
- else if ((i == pending_idx) && pending_valid)
- str = "Pending";
- else
- str = "";
+/**
+ * ice_update_tx_topo_user_sel - Save user's preference in flash
+ * @pf: pointer to pf structure
+ * @layers: value to be saved in flash
+ *
+ * Variable "layers" defines user's preference about number of layers in Tx
+ * Scheduler Topology Tree. This choice should be stored in PFA TLV field
+ * and be picked up by driver, next time during init.
+ *
+ * Return: zero when save was successful, negative values otherwise.
+ */
+static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers)
+{
+ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
+ struct ice_hw *hw = &pf->hw;
+ int err;
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%-8s", str);
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err)
+ return err;
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%-6u", options[i].pmd);
+ err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
+ sizeof(usr_sel), &usr_sel, true, true, NULL);
+ if (err)
+ goto exit_release_res;
- for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
- speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
- str = ice_devlink_port_opt_speed_str(speed);
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%3s ", str);
- }
+ if (layers == ICE_SCHED_5_LAYERS)
+ usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL;
+ else
+ usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL;
- dev_dbg(dev, "%s\n", desc);
- }
+ err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2,
+ sizeof(usr_sel.data), &usr_sel.data,
+ true, NULL, NULL);
+exit_release_res:
+ ice_release_nvm(hw);
-err:
- kfree(options);
+ return err;
}
/**
- * ice_devlink_aq_set_port_option - Send set port option admin queue command
- * @pf: the PF to print split port options
- * @option_idx: selected port option
- * @extack: extended netdev ack structure
+ * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to store the parameter value
*
- * Sends set port option admin queue command with selected port option and
- * calls NVM write activate.
+ * Return: zero on success and negative value on failure.
*/
-static int
-ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
- struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
- struct device *dev = ice_pf_to_dev(pf);
- int status;
-
- status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
- if (status) {
- dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
- return -EIO;
- }
-
- status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
- if (status) {
- dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- return -EIO;
- }
-
- status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
- if (status) {
- dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
- ice_release_nvm(&pf->hw);
- return -EIO;
- }
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
- ice_release_nvm(&pf->hw);
+ err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8);
+ if (err)
+ return err;
- NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
return 0;
}
/**
- * ice_devlink_port_split - .port_split devlink handler
- * @devlink: devlink instance structure
- * @port: devlink port structure
- * @count: number of ports to split to
- * @extack: extended netdev ack structure
- *
- * Callback for the devlink .port_split operation.
- *
- * Unfortunately, the devlink expression of available options is limited
- * to just a number, so search for an FW port option which supports
- * the specified number. As there could be multiple FW port options with
- * the same port split count, allow switching between them. When the same
- * port split count request is issued again, switch to the next FW port
- * option with the same port split count.
+ * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to get the parameter value
+ * @extack: netlink extended ACK structure
*
- * Return: zero on success or an error code on failure.
+ * Return: zero on success and negative value on failure.
*/
-static int
-ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
- unsigned int count, struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
- struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
- u8 i, j, active_idx, pending_idx, new_option;
struct ice_pf *pf = devlink_priv(devlink);
- u8 option_count = ICE_AQC_PORT_OPT_MAX;
- struct device *dev = ice_pf_to_dev(pf);
- bool active_valid, pending_valid;
- int status;
-
- status = ice_aq_get_port_options(&pf->hw, options, &option_count,
- 0, true, &active_idx, &active_valid,
- &pending_idx, &pending_valid);
- if (status) {
- dev_dbg(dev, "Couldn't read port split options, err = %d\n",
- status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
- return -EIO;
- }
-
- new_option = ICE_AQC_PORT_OPT_MAX;
- active_idx = pending_valid ? pending_idx : active_idx;
- for (i = 1; i <= option_count; i++) {
- /* In order to allow switching between FW port options with
- * the same port split count, search for a new option starting
- * from the active/pending option (with array wrap around).
- */
- j = (active_idx + i) % option_count;
-
- if (count == options[j].pmd) {
- new_option = j;
- break;
- }
- }
-
- if (new_option == active_idx) {
- dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
- count);
- NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
- ice_devlink_port_options_print(pf);
- return -EINVAL;
- }
-
- if (new_option == ICE_AQC_PORT_OPT_MAX) {
- dev_dbg(dev, "request to split: count: %u not found\n", count);
- NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
- ice_devlink_port_options_print(pf);
- return -EINVAL;
- }
+ int err;
- status = ice_devlink_aq_set_port_option(pf, new_option, extack);
- if (status)
- return status;
+ err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8);
+ if (err)
+ return err;
- ice_devlink_port_options_print(pf);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect.");
return 0;
}
/**
- * ice_devlink_port_unsplit - .port_unsplit devlink handler
- * @devlink: devlink instance structure
- * @port: devlink port structure
- * @extack: extended netdev ack structure
+ * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers
+ * parameter value
+ * @devlink: unused pointer to devlink instance
+ * @id: the parameter ID to validate
+ * @val: value to validate
+ * @extack: netlink extended ACK structure
*
- * Callback for the devlink .port_unsplit operation.
- * Calls ice_devlink_port_split with split count set to 1.
- * There could be no FW option available with split count 1.
+ * Supported values are:
+ * - 5 - five layers Tx Scheduler Topology Tree
+ * - 9 - nine layers Tx Scheduler Topology Tree
*
- * Return: zero on success or an error code on failure.
+ * Return: zero when passed parameter value is supported. Negative value on
+ * error.
*/
-static int
-ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
- struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
- return ice_devlink_port_split(devlink, port, 1, extack);
+ if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Wrong number of tx scheduler layers provided.");
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
@@ -810,6 +747,10 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_vf *vf;
int i;
+ if (node->rate_node)
+ /* already added, skip to the next */
+ goto traverse_children;
+
if (node->parent == tc_node) {
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
@@ -831,6 +772,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
if (rate_node && !IS_ERR(rate_node))
node->rate_node = rate_node;
+traverse_children:
for (i = 0; i < node->num_children; i++)
ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
}
@@ -861,6 +803,30 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
return 0;
}
+static void ice_clear_rate_nodes(struct ice_sched_node *node)
+{
+ node->rate_node = NULL;
+
+ for (int i = 0; i < node->num_children; i++)
+ ice_clear_rate_nodes(node->children[i]);
+}
+
+/**
+ * ice_devlink_rate_clear_tx_topology - clear node->rate_node
+ * @vsi: main vsi struct
+ *
+ * Clear rate_node to cleanup creation of Tx topology.
+ *
+ */
+void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi)
+{
+ struct ice_port_info *pi = vsi->port_info;
+
+ mutex_lock(&pi->sched_lock);
+ ice_clear_rate_nodes(pi->root->children[0]);
+ mutex_unlock(&pi->sched_lock);
+}
+
/**
* ice_set_object_tx_share - sets node scheduling parameter
* @pi: devlink struct instance
@@ -1221,6 +1187,43 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
}
/**
+ * ice_devlink_reinit_up - do reinit of the given PF
+ * @pf: pointer to the PF struct
+ */
+static int ice_devlink_reinit_up(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ int err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ vsi->flags = ICE_VSI_FLAG_INIT;
+
+ rtnl_lock();
+ err = ice_vsi_cfg(vsi);
+ rtnl_unlock();
+ if (err)
+ goto err_vsi_cfg;
+
+ /* No need to take devl_lock, it's already taken by devlink API */
+ err = ice_load(pf);
+ if (err)
+ goto err_load;
+
+ return 0;
+
+err_load:
+ rtnl_lock();
+ ice_vsi_decfg(vsi);
+ rtnl_unlock();
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
* ice_devlink_reload_up - do reload up after reinit
* @devlink: pointer to the devlink instance reloading
* @action: the action requested
@@ -1240,7 +1243,7 @@ ice_devlink_reload_up(struct devlink *devlink,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return ice_load(pf);
+ return ice_devlink_reinit_up(pf);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
return ice_devlink_reload_empr_finish(pf, extack);
@@ -1289,9 +1292,9 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
return 0;
}
-static int
-ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
bool roce_ena = ctx->val.vbool;
@@ -1340,9 +1343,9 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
return 0;
}
-static int
-ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
bool iw_ena = ctx->val.vbool;
@@ -1380,6 +1383,11 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
return 0;
}
+enum ice_param_id {
+ ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+};
+
static const struct devlink_param ice_devlink_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ice_devlink_enable_roce_get,
@@ -1389,7 +1397,13 @@ static const struct devlink_param ice_devlink_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
-
+ DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+ "tx_scheduling_layers",
+ DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ ice_devlink_tx_sched_layers_get,
+ ice_devlink_tx_sched_layers_set,
+ ice_devlink_tx_sched_layers_validate),
};
static void ice_devlink_free(void *devlink_ptr)
@@ -1432,7 +1446,7 @@ void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- devlink_register(devlink);
+ devl_register(devlink);
}
/**
@@ -1443,194 +1457,28 @@ void ice_devlink_register(struct ice_pf *pf)
*/
void ice_devlink_unregister(struct ice_pf *pf)
{
- devlink_unregister(priv_to_devlink(pf));
-}
-
-/**
- * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
- * @pf: the PF to create a devlink port for
- * @ppid: struct with switch id information
- */
-static void
-ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
-{
- struct pci_dev *pdev = pf->pdev;
- u64 id;
-
- id = pci_get_dsn(pdev);
-
- ppid->id_len = sizeof(id);
- put_unaligned_be64(id, &ppid->id);
+ devl_unregister(priv_to_devlink(pf));
}
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_hw *hw = &pf->hw;
+ size_t params_size;
- return devlink_params_register(devlink, ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
-}
-
-void ice_devlink_unregister_params(struct ice_pf *pf)
-{
- devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
-}
-
-/**
- * ice_devlink_set_port_split_options - Set port split options
- * @pf: the PF to set port split options
- * @attrs: devlink attributes
- *
- * Sets devlink port split options based on available FW port options
- */
-static void
-ice_devlink_set_port_split_options(struct ice_pf *pf,
- struct devlink_port_attrs *attrs)
-{
- struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
- u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
- bool active_valid, pending_valid;
- int status;
-
- status = ice_aq_get_port_options(&pf->hw, options, &option_count,
- 0, true, &active_idx, &active_valid,
- &pending_idx, &pending_valid);
- if (status) {
- dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
- status);
- return;
- }
-
- /* find the biggest available port split count */
- for (i = 0; i < option_count; i++)
- attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
-
- attrs->splittable = attrs->lanes ? 1 : 0;
- ice_active_port_option = active_idx;
-}
-
-static const struct devlink_port_ops ice_devlink_port_ops = {
- .port_split = ice_devlink_port_split,
- .port_unsplit = ice_devlink_port_unsplit,
-};
-
-/**
- * ice_devlink_create_pf_port - Create a devlink port for this PF
- * @pf: the PF to create a devlink port for
- *
- * Create and register a devlink_port for this PF.
- *
- * Return: zero on success or an error code on failure.
- */
-int ice_devlink_create_pf_port(struct ice_pf *pf)
-{
- struct devlink_port_attrs attrs = {};
- struct devlink_port *devlink_port;
- struct devlink *devlink;
- struct ice_vsi *vsi;
- struct device *dev;
- int err;
-
- dev = ice_pf_to_dev(pf);
-
- devlink_port = &pf->devlink_port;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return -EIO;
-
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.bus.func;
-
- /* As FW supports only port split options for whole device,
- * set port split options only for first PF.
- */
- if (pf->hw.pf_id == 0)
- ice_devlink_set_port_split_options(pf, &attrs);
-
- ice_devlink_set_switch_id(pf, &attrs.switch_id);
-
- devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
-
- err = devlink_port_register_with_ops(devlink, devlink_port, vsi->idx,
- &ice_devlink_port_ops);
- if (err) {
- dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
- pf->hw.pf_id, err);
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
- * @pf: the PF to cleanup
- *
- * Unregisters the devlink_port structure associated with this PF.
- */
-void ice_devlink_destroy_pf_port(struct ice_pf *pf)
-{
- devlink_port_unregister(&pf->devlink_port);
-}
-
-/**
- * ice_devlink_create_vf_port - Create a devlink port for this VF
- * @vf: the VF to create a port for
- *
- * Create and register a devlink_port for this VF.
- *
- * Return: zero on success or an error code on failure.
- */
-int ice_devlink_create_vf_port(struct ice_vf *vf)
-{
- struct devlink_port_attrs attrs = {};
- struct devlink_port *devlink_port;
- struct devlink *devlink;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_pf *pf;
- int err;
-
- pf = vf->pf;
- dev = ice_pf_to_dev(pf);
- devlink_port = &vf->devlink_port;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
- attrs.pci_vf.pf = pf->hw.bus.func;
- attrs.pci_vf.vf = vf->vf_id;
-
- ice_devlink_set_switch_id(pf, &attrs.switch_id);
+ params_size = ARRAY_SIZE(ice_devlink_params);
- devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
+ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ params_size--;
- err = devlink_port_register(devlink, devlink_port, vsi->idx);
- if (err) {
- dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
- vf->vf_id, err);
- return err;
- }
-
- return 0;
+ return devl_params_register(devlink, ice_devlink_params,
+ params_size);
}
-/**
- * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
- * @vf: the VF to cleanup
- *
- * Unregisters the devlink_port structure associated with this VF.
- */
-void ice_devlink_destroy_vf_port(struct ice_vf *vf)
+void ice_devlink_unregister_params(struct ice_pf *pf)
{
- devl_rate_leaf_destroy(&vf->devlink_port);
- devlink_port_unregister(&vf->devlink_port);
+ devl_params_unregister(priv_to_devlink(pf), ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
}
#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
@@ -1871,8 +1719,8 @@ void ice_devlink_init_regions(struct ice_pf *pf)
u64 nvm_size, sram_size;
nvm_size = pf->hw.flash.flash_size;
- pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
- nvm_size);
+ pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1,
+ nvm_size);
if (IS_ERR(pf->nvm_region)) {
dev_err(dev, "failed to create NVM devlink region, err %ld\n",
PTR_ERR(pf->nvm_region));
@@ -1880,17 +1728,17 @@ void ice_devlink_init_regions(struct ice_pf *pf)
}
sram_size = pf->hw.flash.sr_words * 2u;
- pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
- 1, sram_size);
+ pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops,
+ 1, sram_size);
if (IS_ERR(pf->sram_region)) {
dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
PTR_ERR(pf->sram_region));
pf->sram_region = NULL;
}
- pf->devcaps_region = devlink_region_create(devlink,
- &ice_devcaps_region_ops, 10,
- ICE_AQ_MAX_BUF_LEN);
+ pf->devcaps_region = devl_region_create(devlink,
+ &ice_devcaps_region_ops, 10,
+ ICE_AQ_MAX_BUF_LEN);
if (IS_ERR(pf->devcaps_region)) {
dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
PTR_ERR(pf->devcaps_region));
@@ -1907,11 +1755,11 @@ void ice_devlink_init_regions(struct ice_pf *pf)
void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
- devlink_region_destroy(pf->nvm_region);
+ devl_region_destroy(pf->nvm_region);
if (pf->sram_region)
- devlink_region_destroy(pf->sram_region);
+ devl_region_destroy(pf->sram_region);
if (pf->devcaps_region)
- devlink_region_destroy(pf->devcaps_region);
+ devl_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h
index 6ec96779f52e..d291c0e2e17b 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h
@@ -20,5 +20,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf);
int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi);
void ice_tear_down_devlink_rate_tree(struct ice_pf *pf);
+void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi);
#endif /* _ICE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
new file mode 100644
index 000000000000..13e6790d3cae
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include <linux/vmalloc.h>
+
+#include "ice.h"
+#include "devlink.h"
+
+static int ice_active_port_option = -1;
+
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
+/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
+static const struct devlink_port_ops ice_devlink_port_ops = {
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
+};
+
+/**
+ * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
+ * @pf: the PF to create a devlink port for
+ * @ppid: struct with switch id information
+ */
+static void
+ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = pf->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ice_devlink_create_pf_port - Create a devlink port for this PF
+ * @pf: the PF to create a devlink port for
+ *
+ * Create and register a devlink_port for this PF.
+ * This function has to be called under devl_lock.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_pf_port(struct ice_pf *pf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
+
+ devlink = priv_to_devlink(pf);
+
+ dev = ice_pf_to_dev(pf);
+
+ devlink_port = &pf->devlink_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EIO;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.bus.func;
+
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
+ pf->hw.pf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this PF.
+ * This function has to be called under devl_lock.
+ */
+void ice_devlink_destroy_pf_port(struct ice_pf *pf)
+{
+ devl_port_unregister(&pf->devlink_port);
+}
+
+/**
+ * ice_devlink_create_vf_port - Create a devlink port for this VF
+ * @vf: the VF to create a port for
+ *
+ * Create and register a devlink_port for this VF.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_vf_port(struct ice_vf *vf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ int err;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ devlink_port = &vf->devlink_port;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = pf->hw.bus.func;
+ attrs.pci_vf.vf = vf->vf_id;
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
+ * @vf: the VF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this VF.
+ */
+void ice_devlink_destroy_vf_port(struct ice_vf *vf)
+{
+ devl_rate_leaf_destroy(&vf->devlink_port);
+ devlink_port_unregister(&vf->devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
new file mode 100644
index 000000000000..9223bcdb6444
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _DEVLINK_PORT_H_
+#define _DEVLINK_PORT_H_
+
+int ice_devlink_create_pf_port(struct ice_pf *pf);
+void ice_devlink_destroy_pf_port(struct ice_pf *pf);
+int ice_devlink_create_vf_port(struct ice_vf *vf);
+void ice_devlink_destroy_vf_port(struct ice_vf *vf);
+
+#endif /* _DEVLINK_PORT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 351e0d36df44..6ad8002b22e1 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -77,6 +77,7 @@
#include "ice_gnss.h"
#include "ice_irq.h"
#include "ice_dpll.h"
+#include "ice_adapter.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -330,7 +331,6 @@ struct ice_vsi {
struct net_device *netdev;
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
- struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_rx_ring **rx_rings; /* Rx ring array */
struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
@@ -348,18 +348,16 @@ struct ice_vsi {
/* tell if only dynamic irq allocation is allowed */
bool irq_dyn_alloc;
- enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
- struct ice_vf *vf; /* VF associated with this VSI */
-
u16 num_gfltr;
u16 num_bfltr;
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
+ u8 rss_hfunc; /* User configured hash type */
u8 *rss_hkey_user; /* User configured hash keys */
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
@@ -444,12 +442,18 @@ struct ice_vsi {
u8 old_numtc;
u16 old_ena_tc;
- struct ice_channel *ch;
-
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
struct ice_agg_node *agg_node;
+
+ struct_group_tagged(ice_vsi_cfg_params, params,
+ struct ice_port_info *port_info; /* back pointer to port_info */
+ struct ice_channel *ch; /* VSI's channel structure, may be NULL */
+ struct ice_vf *vf; /* VF associated with this VSI, may be NULL */
+ u32 flags; /* VSI flags used for rebuild and configuration */
+ enum ice_vsi_type type; /* the type of the VSI */
+ );
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -457,7 +461,7 @@ struct ice_q_vector {
struct ice_vsi *vsi;
u16 v_idx; /* index in the vsi->q_vector array. */
- u16 reg_idx;
+ u16 reg_idx; /* PF relative register index */
u8 num_ring_rx; /* total number of Rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */
u8 wb_on_itr:1; /* if true, WB on ITR is enabled */
@@ -479,6 +483,7 @@ struct ice_q_vector {
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
+ u16 vf_reg_idx; /* VF relative register index */
struct msi_map irq;
} ____cacheline_internodealigned_in_smp;
@@ -492,7 +497,6 @@ enum ice_pf_flags {
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
- ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -517,15 +521,14 @@ enum ice_pf_flags {
};
enum ice_misc_thread_tasks {
- ICE_MISC_THREAD_EXTTS_EVENT,
ICE_MISC_THREAD_TX_TSTAMP,
ICE_MISC_THREAD_NBITS /* must be last */
};
-struct ice_switchdev_info {
- struct ice_vsi *control_vsi;
+struct ice_eswitch {
struct ice_vsi *uplink_vsi;
struct ice_esw_br_offloads *br_offloads;
+ struct xarray reprs;
bool is_running;
};
@@ -538,6 +541,7 @@ struct ice_agg_node {
struct ice_pf {
struct pci_dev *pdev;
+ struct ice_adapter *adapter;
struct devlink_region *nvm_region;
struct devlink_region *sram_region;
@@ -563,6 +567,10 @@ struct ice_pf {
struct ice_vsi_stats **vsi_stats;
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
+ struct dentry *ice_debugfs_pf;
+ struct dentry *ice_debugfs_pf_fwlog;
+ /* keep track of all the dentrys for FW log modules */
+ struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -595,8 +603,10 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 hw_rx_eipe_error;
u32 oicr_err_reg;
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
+ struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
u16 num_lan_msix; /* Total MSIX vectors for base driver */
@@ -621,6 +631,7 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
+ char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
@@ -637,7 +648,7 @@ struct ice_pf {
struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */
- struct ice_switchdev_info switchdev;
+ struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
#define ICE_INVALID_AGG_NODE_ID 0
@@ -648,6 +659,7 @@ struct ice_pf {
#define ICE_MAX_VF_AGG_NODES 32
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
+ struct device *hwmon_dev;
};
extern struct workqueue_struct *ice_lag_wq;
@@ -846,7 +858,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
*/
static inline bool ice_is_switchdev_running(struct ice_pf *pf)
{
- return pf->switchdev.is_running;
+ return pf->eswitch.is_running;
}
#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
@@ -881,6 +893,12 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
+void ice_debugfs_fwlog_init(struct ice_pf *pf);
+void ice_debugfs_pf_deinit(struct ice_pf *pf);
+void ice_debugfs_init(void);
+void ice_debugfs_exit(void);
+void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
+
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
@@ -912,6 +930,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
+int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
@@ -963,6 +982,8 @@ void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+int ice_init_dev(struct ice_pf *pf);
+void ice_deinit_dev(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
@@ -989,4 +1010,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
+
+extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
new file mode 100644
index 000000000000..52d15ef7f4b1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright Red Hat
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+#include "ice_adapter.h"
+
+static DEFINE_XARRAY(ice_adapters);
+
+/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
+#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
+#define INDEX_FIELD_BUS GENMASK(12, 5)
+#define INDEX_FIELD_SLOT GENMASK(4, 0)
+
+static unsigned long ice_adapter_index(const struct pci_dev *pdev)
+{
+ unsigned int domain = pci_domain_nr(pdev->bus);
+
+ WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
+
+ return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
+ FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
+ FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
+}
+
+static struct ice_adapter *ice_adapter_new(void)
+{
+ struct ice_adapter *adapter;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return NULL;
+
+ spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ refcount_set(&adapter->refcount, 1);
+
+ return adapter;
+}
+
+static void ice_adapter_free(struct ice_adapter *adapter)
+{
+ kfree(adapter);
+}
+
+DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T))
+
+/**
+ * ice_adapter_get - Get a shared ice_adapter structure.
+ * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter.
+ *
+ * Gets a pointer to a shared ice_adapter structure. Physical functions (PFs)
+ * of the same multi-function PCI device share one ice_adapter structure.
+ * The ice_adapter is reference-counted. The PF driver must use ice_adapter_put
+ * to release its reference.
+ *
+ * Context: Process, may sleep.
+ * Return: Pointer to ice_adapter on success.
+ * ERR_PTR() on error. -ENOMEM is the only possible error.
+ */
+struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+{
+ struct ice_adapter *ret, __free(ice_adapter_free) *adapter = NULL;
+ unsigned long index = ice_adapter_index(pdev);
+
+ adapter = ice_adapter_new();
+ if (!adapter)
+ return ERR_PTR(-ENOMEM);
+
+ xa_lock(&ice_adapters);
+ ret = __xa_cmpxchg(&ice_adapters, index, NULL, adapter, GFP_KERNEL);
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ goto unlock;
+ }
+ if (ret) {
+ refcount_inc(&ret->refcount);
+ goto unlock;
+ }
+ ret = no_free_ptr(adapter);
+unlock:
+ xa_unlock(&ice_adapters);
+ return ret;
+}
+
+/**
+ * ice_adapter_put - Release a reference to the shared ice_adapter structure.
+ * @pdev: Pointer to the pci_dev whose driver is releasing the ice_adapter.
+ *
+ * Releases the reference to ice_adapter previously obtained with
+ * ice_adapter_get.
+ *
+ * Context: Any.
+ */
+void ice_adapter_put(const struct pci_dev *pdev)
+{
+ unsigned long index = ice_adapter_index(pdev);
+ struct ice_adapter *adapter;
+
+ xa_lock(&ice_adapters);
+ adapter = xa_load(&ice_adapters, index);
+ if (WARN_ON(!adapter))
+ goto unlock;
+
+ if (!refcount_dec_and_test(&adapter->refcount))
+ goto unlock;
+
+ WARN_ON(__xa_erase(&ice_adapters, index) != adapter);
+ ice_adapter_free(adapter);
+unlock:
+ xa_unlock(&ice_adapters);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
new file mode 100644
index 000000000000..9d11014ec02f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-FileCopyrightText: Copyright Red Hat */
+
+#ifndef _ICE_ADAPTER_H_
+#define _ICE_ADAPTER_H_
+
+#include <linux/spinlock_types.h>
+#include <linux/refcount_types.h>
+
+struct pci_dev;
+
+/**
+ * struct ice_adapter - PCI adapter resources shared across PFs
+ * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
+ * register of the PTP clock.
+ * @refcount: Reference count. struct ice_pf objects hold the references.
+ */
+struct ice_adapter {
+ /* For access to the GLTSYN_TIME register */
+ spinlock_t ptp_gltsyn_time_lock;
+
+ refcount_t refcount;
+};
+
+struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
+void ice_adapter_put(const struct pci_dev *pdev);
+
+#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index d7fdb7ba7268..e76c388b9905 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -117,9 +117,11 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
#define ICE_AQC_CAPS_RDMA 0x0051
+#define ICE_AQC_CAPS_SENSOR_READING 0x0067
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
+#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
@@ -263,6 +265,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15)
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
@@ -421,10 +425,10 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2)
#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3
#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
-#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
-#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH 0x0U
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP 0x1U
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR 0x2U
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING 0x3U
u8 inner_vlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
@@ -490,11 +494,11 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M GENMASK(7, 6)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ 0x0U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ 0x1U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR 0x2U
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_JHASH 0x3U
u8 q_opt_tc;
#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
@@ -592,8 +596,9 @@ struct ice_aqc_recipe_data_elem {
struct ice_aqc_recipe_to_profile {
__le16 profile_id;
u8 rsvd[6];
- DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+ __le64 recipe_assoc;
};
+static_assert(sizeof(struct ice_aqc_recipe_to_profile) == 16);
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
@@ -806,6 +811,23 @@ struct ice_aqc_get_topo {
__le32 addr_low;
};
+/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
+struct ice_aqc_get_set_tx_topo {
+ u8 set_flags;
+#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
+#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
+#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
+#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
+
+ u8 get_flags;
+#define ICE_AQC_TX_TOPO_GET_RAM 2
+
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
@@ -1359,8 +1381,9 @@ struct ice_aqc_get_link_status_data {
u8 lp_flowcontrol;
#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0)
#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
#define ICE_AQC_LS_DATA_SIZE_V2 \
- offsetofend(struct ice_aqc_get_link_status_data, lp_flowcontrol)
+ offsetofend(struct ice_aqc_get_link_status_data, reserved5)
} __packed;
/* Set event mask command (direct 0x0613) */
@@ -1413,6 +1436,30 @@ struct ice_aqc_get_phy_rec_clk_out {
__le16 node_handle;
};
+/* Get sensor reading (direct 0x0632) */
+struct ice_aqc_get_sensor_reading {
+ u8 sensor;
+ u8 format;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get sensor reading response (direct 0x0632) */
+struct ice_aqc_get_sensor_reading_resp {
+ union {
+ u8 raw[8];
+ /* Output data for sensor 0x00, format 0x00 */
+ struct _packed {
+ s8 temp;
+ u8 temp_warning_threshold;
+ u8 temp_critical_threshold;
+ u8 temp_fatal_threshold;
+ u8 reserved[4];
+ } s0f0;
+ } data;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -1637,6 +1684,15 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_START_POINT 0
+#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
+
+struct ice_aqc_nvm_tx_topo_user_sel {
+ __le16 length;
+ u8 data;
+#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
+ u8 reserved;
+};
+
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
u8 flags;
@@ -2069,78 +2125,6 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
-/* Configure Firmware Logging Command (indirect 0xFF09)
- * Logging Information Read Response (indirect 0xFF10)
- * Note: The 0xFF10 command has no input parameters.
- */
-struct ice_aqc_fw_logging {
- u8 log_ctrl;
-#define ICE_AQC_FW_LOG_AQ_EN BIT(0)
-#define ICE_AQC_FW_LOG_UART_EN BIT(1)
- u8 rsvd0;
- u8 log_ctrl_valid; /* Not used by 0xFF10 Response */
-#define ICE_AQC_FW_LOG_AQ_VALID BIT(0)
-#define ICE_AQC_FW_LOG_UART_VALID BIT(1)
- u8 rsvd1[5];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_NETPROXY,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
-/* Defines for both above FW logging command/response buffers */
-#define ICE_AQC_FW_LOG_ID_S 0
-#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
-
-#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */
-#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */
-
-#define ICE_AQC_FW_LOG_EN_S 12
-#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S)
-#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */
-#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
-#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
-#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
-
-/* Get/Clear FW Log (indirect 0xFF11) */
-struct ice_aqc_get_clear_fw_log {
- u8 flags;
-#define ICE_AQC_FW_LOG_CLEAR BIT(0)
-#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1)
- u8 rsvd1[7];
- __le32 addr_high;
- __le32 addr_low;
-};
-
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2403,6 +2387,84 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
+enum ice_aqc_fw_logging_mod {
+ ICE_AQC_FW_LOG_ID_GENERAL = 0,
+ ICE_AQC_FW_LOG_ID_CTRL,
+ ICE_AQC_FW_LOG_ID_LINK,
+ ICE_AQC_FW_LOG_ID_LINK_TOPO,
+ ICE_AQC_FW_LOG_ID_DNL,
+ ICE_AQC_FW_LOG_ID_I2C,
+ ICE_AQC_FW_LOG_ID_SDP,
+ ICE_AQC_FW_LOG_ID_MDIO,
+ ICE_AQC_FW_LOG_ID_ADMINQ,
+ ICE_AQC_FW_LOG_ID_HDMA,
+ ICE_AQC_FW_LOG_ID_LLDP,
+ ICE_AQC_FW_LOG_ID_DCBX,
+ ICE_AQC_FW_LOG_ID_DCB,
+ ICE_AQC_FW_LOG_ID_XLR,
+ ICE_AQC_FW_LOG_ID_NVM,
+ ICE_AQC_FW_LOG_ID_AUTH,
+ ICE_AQC_FW_LOG_ID_VPD,
+ ICE_AQC_FW_LOG_ID_IOSF,
+ ICE_AQC_FW_LOG_ID_PARSER,
+ ICE_AQC_FW_LOG_ID_SW,
+ ICE_AQC_FW_LOG_ID_SCHEDULER,
+ ICE_AQC_FW_LOG_ID_TXQ,
+ ICE_AQC_FW_LOG_ID_RSVD,
+ ICE_AQC_FW_LOG_ID_POST,
+ ICE_AQC_FW_LOG_ID_WATCHDOG,
+ ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ ICE_AQC_FW_LOG_ID_MNG,
+ ICE_AQC_FW_LOG_ID_SYNCE,
+ ICE_AQC_FW_LOG_ID_HEALTH,
+ ICE_AQC_FW_LOG_ID_TSDRV,
+ ICE_AQC_FW_LOG_ID_PFREG,
+ ICE_AQC_FW_LOG_ID_MDLVER,
+ ICE_AQC_FW_LOG_ID_MAX,
+};
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ */
+struct ice_aqc_fw_log {
+ u8 cmd_flags;
+#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
+#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
+#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
+#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
+#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
+#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
+
+ u8 rsp_flag;
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
+#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
+
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ice_aqc_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -2443,6 +2505,8 @@ struct ice_aq_desc {
struct ice_aqc_restart_an restart_an;
struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
+ struct ice_aqc_get_sensor_reading get_sensor_reading;
+ struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
@@ -2480,8 +2544,6 @@ struct ice_aq_desc {
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
- struct ice_aqc_fw_logging fw_logging;
- struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_cgu_input_config set_cgu_input_config;
struct ice_aqc_get_cgu_input_config get_cgu_input_config;
@@ -2493,6 +2555,7 @@ struct ice_aq_desc {
struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
struct ice_aqc_get_cgu_info get_cgu_info;
struct ice_aqc_driver_shared_params drv_shared_params;
+ struct ice_aqc_fw_log fw_log;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_mac_cfg set_mac_cfg;
@@ -2502,6 +2565,7 @@ struct ice_aq_desc {
struct ice_aqc_get_link_topo get_link_topo;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
+ struct ice_aqc_get_set_tx_topo get_set_tx_topo;
} params;
};
@@ -2608,6 +2672,10 @@ enum ice_adminq_opc {
ice_aqc_opc_query_sched_res = 0x0412,
ice_aqc_opc_remove_rl_profiles = 0x0415,
+ /* tx topology commands */
+ ice_aqc_opc_set_tx_topo = 0x0417,
+ ice_aqc_opc_get_tx_topo = 0x0418,
+
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
ice_aqc_opc_set_phy_cfg = 0x0601,
@@ -2618,6 +2686,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
+ ice_aqc_opc_get_sensor_reading = 0x0632,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
@@ -2690,9 +2759,11 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
- /* debug commands */
- ice_aqc_opc_fw_logging = 0xFF09,
- ice_aqc_opc_fw_logging_info = 0xFF10,
+ /* FW Logging Commands */
+ ice_aqc_opc_fw_logs_config = 0xFF30,
+ ice_aqc_opc_fw_logs_register = 0xFF31,
+ ice_aqc_opc_fw_logs_query = 0xFF32,
+ ice_aqc_opc_fw_logs_event = 0xFF33,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index cca0e753f38f..7cee365cc7d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2018-2020, Intel Corporation. */
#include "ice.h"
+#include <net/rps.h>
/**
* ice_is_arfs_active - helper to check is aRFS is active
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 7fa43827a3f0..687f6cb2b917 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -121,7 +121,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->irq.index = -ENOENT;
if (vsi->type == ICE_VSI_VF) {
- q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector);
+ ice_calc_vf_reg_idx(vsi->vf, q_vector);
goto out;
} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
@@ -145,6 +145,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
+ q_vector->vf_reg_idx = q_vector->irq.index;
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
@@ -189,10 +190,16 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
tx_ring->q_vector = NULL;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ }
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
rx_ring->q_vector = NULL;
+ }
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -224,24 +231,16 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
/* no need to update global register if ITR gran is already set */
if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
- (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
- GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
- GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
- GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
- GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US))
return;
- regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
- GLINT_CTL_ITR_GRAN_200_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
- GLINT_CTL_ITR_GRAN_100_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
- GLINT_CTL_ITR_GRAN_50_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
- GLINT_CTL_ITR_GRAN_25_M);
+ regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US);
wr32(hw, GLINT_CTL, regval);
}
@@ -266,30 +265,6 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
}
/**
- * ice_eswitch_calc_txq_handle
- * @ring: pointer to ring which unique index is needed
- *
- * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
- * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
- * here by finding index in vsi->tx_rings of this ring.
- *
- * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
- * because VSI is get from ring->vsi, so it has to be present in this VSI.
- */
-static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
-{
- struct ice_vsi *vsi = ring->vsi;
- int i;
-
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i] == ring)
- return i;
- }
-
- return ICE_INVAL_Q_INDEX;
-}
-
-/**
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
* @ring: The Tx ring to configure
*
@@ -355,9 +330,6 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
- break;
default:
return;
}
@@ -481,6 +453,14 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* PF acts as uplink for switchdev; set flex descriptor with src_vsi
+ * metadata and flags to allow redirecting to PR netdev
+ */
+ if (ice_is_eswitch_mode_switchdev(vsi->back)) {
+ ring->flags |= ICE_RX_FLAGS_MULTIDEV;
+ rxdid = ICE_RXDID_FLEX_NIC_2;
+ }
+
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
* increasing context priority to pick up profile ID; default is 0x01;
@@ -519,13 +499,26 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
return 0;
}
+static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
+{
+ void *ctx_ptr = &ring->pkt_ctx;
+ struct xsk_cb_desc desc = {};
+
+ XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
+ desc.src = &ctx_ptr;
+ desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
+ sizeof(struct xdp_buff);
+ desc.bytes = sizeof(ctx_ptr);
+ xsk_pool_fill_cb(ring->xsk_pool, &desc);
+}
+
/**
* ice_vsi_cfg_rxq - Configure an Rx queue
* @ring: the ring being configured
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
@@ -534,36 +527,46 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF) {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index,
- ring->q_vector->napi.napi_id,
- ring->vsi->rx_buf_len);
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ ice_xsk_pool_fill_cb(ring);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- __xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index,
- ring->q_vector->napi.napi_id,
- ring->vsi->rx_buf_len);
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
@@ -575,6 +578,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
ring->xdp.data = NULL;
+ ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
@@ -609,6 +613,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+ if (q_idx >= vsi->num_rxq)
+ return -EINVAL;
+
+ return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+ }
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
+ ice_vsi_cfg_frame_size(vsi);
+setup_rings:
+ /* set up individual rings */
+ ice_for_each_rxq(vsi, i) {
+ int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
@@ -804,7 +864,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
*/
-int
+static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
@@ -841,14 +901,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
- if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
- ring->q_handle = ice_eswitch_calc_txq_handle(ring);
-
- if (ring->q_handle == ICE_INVAL_Q_INDEX)
- return -ENODEV;
- } else {
- ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
- }
+ ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
if (ch)
status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
@@ -875,6 +928,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
return 0;
}
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+
+ if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+ return -EINVAL;
+
+ qg_buf->num_txqs = 1;
+
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ * @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+static int
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ int err = 0;
+ u16 q_idx;
+
+ qg_buf->num_txqs = 1;
+
+ for (q_idx = 0; q_idx < count; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+{
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
+}
+
+/**
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
+ */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+{
+ int ret;
+ int i;
+
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
+ if (ret)
+ return ret;
+
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
+
+ return 0;
+}
+
/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
@@ -913,10 +1040,10 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
struct ice_hw *hw = &pf->hw;
u32 val;
- itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+ itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx);
val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+ FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
if (ice_is_xdp_ena_vsi(vsi)) {
@@ -945,10 +1072,10 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
struct ice_hw *hw = &pf->hw;
u32 val;
- itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+ itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx);
val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+ FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
@@ -960,7 +1087,7 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
* @hw: pointer to the HW structure
* @q_vector: interrupt vector to trigger the software interrupt for
*/
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector)
{
wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
(ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
@@ -1035,7 +1162,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue
*/
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_channel *ch = ring->ch;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index b67dca417acb..b711bc921928 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,8 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -14,20 +15,21 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
- struct ice_aqc_add_tx_qgrp *qg_buf);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx);
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 9a6c25f98632..5649b257e631 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -154,10 +154,22 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823L_SFP:
hw->mac_type = ICE_MAC_GENERIC;
break;
- case ICE_DEV_ID_E830_BACKPLANE:
- case ICE_DEV_ID_E830_QSFP56:
- case ICE_DEV_ID_E830_SFP:
- case ICE_DEV_ID_E830_SFP_DD:
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ hw->mac_type = ICE_MAC_GENERIC_3K_E825;
+ break;
+ case ICE_DEV_ID_E830CC_BACKPLANE:
+ case ICE_DEV_ID_E830CC_QSFP56:
+ case ICE_DEV_ID_E830CC_SFP:
+ case ICE_DEV_ID_E830CC_SFP_DD:
+ case ICE_DEV_ID_E830C_BACKPLANE:
+ case ICE_DEV_ID_E830_XXV_BACKPLANE:
+ case ICE_DEV_ID_E830C_QSFP:
+ case ICE_DEV_ID_E830_XXV_QSFP:
+ case ICE_DEV_ID_E830C_SFP:
+ case ICE_DEV_ID_E830_XXV_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -170,6 +182,18 @@ static int ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_is_generic_mac - check if device's mac_type is generic
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if mac_type is generic (with SBQ support), false if not
+ */
+bool ice_is_generic_mac(struct ice_hw *hw)
+{
+ return (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->mac_type == ICE_MAC_GENERIC_3K_E825);
+}
+
+/**
* ice_is_e810
* @hw: pointer to the hardware structure
*
@@ -241,6 +265,25 @@ bool ice_is_e823(struct ice_hw *hw)
}
/**
+ * ice_is_e825c - Check if a device is E825C family device
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if the device is E825-C based, false if not.
+ */
+bool ice_is_e825c(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -934,216 +977,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
}
/**
- * ice_get_fw_log_cfg - get FW logging configuration
- * @hw: pointer to the HW struct
- */
-static int ice_get_fw_log_cfg(struct ice_hw *hw)
-{
- struct ice_aq_desc desc;
- __le16 *config;
- int status;
- u16 size;
-
- size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
- config = kzalloc(size, GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
-
- status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
- if (!status) {
- u16 i;
-
- /* Save FW logging information into the HW structure */
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- u16 v, m, flgs;
-
- v = le16_to_cpu(config[i]);
- m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
- flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
-
- if (m < ICE_AQC_FW_LOG_ID_MAX)
- hw->fw_log.evnts[m].cur = flgs;
- }
- }
-
- kfree(config);
-
- return status;
-}
-
-/**
- * ice_cfg_fw_log - configure FW logging
- * @hw: pointer to the HW struct
- * @enable: enable certain FW logging events if true, disable all if false
- *
- * This function enables/disables the FW logging via Rx CQ events and a UART
- * port based on predetermined configurations. FW logging via the Rx CQ can be
- * enabled/disabled for individual PF's. However, FW logging via the UART can
- * only be enabled/disabled for all PFs on the same device.
- *
- * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
- * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
- * before initializing the device.
- *
- * When re/configuring FW logging, callers need to update the "cfg" elements of
- * the hw->fw_log.evnts array with the desired logging event configurations for
- * modules of interest. When disabling FW logging completely, the callers can
- * just pass false in the "enable" parameter. On completion, the function will
- * update the "cur" element of the hw->fw_log.evnts array with the resulting
- * logging event configurations of the modules that are being re/configured. FW
- * logging modules that are not part of a reconfiguration operation retain their
- * previous states.
- *
- * Before resetting the device, it is recommended that the driver disables FW
- * logging before shutting down the control queue. When disabling FW logging
- * ("enable" = false), the latest configurations of FW logging events stored in
- * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
- * a device reset.
- *
- * When enabling FW logging to emit log messages via the Rx CQ during the
- * device's initialization phase, a mechanism alternative to interrupt handlers
- * needs to be used to extract FW log messages from the Rx CQ periodically and
- * to prevent the Rx CQ from being full and stalling other types of control
- * messages from FW to SW. Interrupts are typically disabled during the device's
- * initialization phase.
- */
-static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
-{
- struct ice_aqc_fw_logging *cmd;
- u16 i, chgs = 0, len = 0;
- struct ice_aq_desc desc;
- __le16 *data = NULL;
- u8 actv_evnts = 0;
- void *buf = NULL;
- int status = 0;
-
- if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
- return 0;
-
- /* Disable FW logging only when the control queue is still responsive */
- if (!enable &&
- (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
- return 0;
-
- /* Get current FW log settings */
- status = ice_get_fw_log_cfg(hw);
- if (status)
- return status;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
- cmd = &desc.params.fw_logging;
-
- /* Indicate which controls are valid */
- if (hw->fw_log.cq_en)
- cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
-
- if (hw->fw_log.uart_en)
- cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
-
- if (enable) {
- /* Fill in an array of entries with FW logging modules and
- * logging events being reconfigured.
- */
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- u16 val;
-
- /* Keep track of enabled event types */
- actv_evnts |= hw->fw_log.evnts[i].cfg;
-
- if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
- continue;
-
- if (!data) {
- data = devm_kcalloc(ice_hw_to_dev(hw),
- ICE_AQC_FW_LOG_ID_MAX,
- sizeof(*data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- }
-
- val = i << ICE_AQC_FW_LOG_ID_S;
- val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
- data[chgs++] = cpu_to_le16(val);
- }
-
- /* Only enable FW logging if at least one module is specified.
- * If FW logging is currently enabled but all modules are not
- * enabled to emit log messages, disable FW logging altogether.
- */
- if (actv_evnts) {
- /* Leave if there is effectively no change */
- if (!chgs)
- goto out;
-
- if (hw->fw_log.cq_en)
- cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
-
- if (hw->fw_log.uart_en)
- cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
-
- buf = data;
- len = sizeof(*data) * chgs;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- }
- }
-
- status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
- if (!status) {
- /* Update the current configuration to reflect events enabled.
- * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
- * logging mode is enabled for the device. They do not reflect
- * actual modules being enabled to emit log messages. So, their
- * values remain unchanged even when all modules are disabled.
- */
- u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
-
- hw->fw_log.actv_evnts = actv_evnts;
- for (i = 0; i < cnt; i++) {
- u16 v, m;
-
- if (!enable) {
- /* When disabling all FW logging events as part
- * of device's de-initialization, the original
- * configurations are retained, and can be used
- * to reconfigure FW logging later if the device
- * is re-initialized.
- */
- hw->fw_log.evnts[i].cur = 0;
- continue;
- }
-
- v = le16_to_cpu(data[i]);
- m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
- hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
- }
- }
-
-out:
- devm_kfree(ice_hw_to_dev(hw), data);
-
- return status;
-}
-
-/**
- * ice_output_fw_log
- * @hw: pointer to the HW struct
- * @desc: pointer to the AQ message descriptor
- * @buf: pointer to the buffer accompanying the AQ message
- *
- * Formats a FW Log message and outputs it via the standard driver logs.
- */
-void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
-{
- ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
- ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
- le16_to_cpu(desc->datalen));
- ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
-}
-
-/**
* ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
*
@@ -1152,9 +985,8 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
{
- u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
- GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
- GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+ u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M,
+ rd32(hw, GL_PWR_MODE_CTL));
switch (max_agg_bw) {
case ICE_MAX_AGG_BW_200G:
@@ -1176,9 +1008,9 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
+ void *mac_buf __free(kfree) = NULL;
u16 mac_buf_len;
- void *mac_buf;
int status;
/* Set MAC type based on DeviceID */
@@ -1186,9 +1018,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
- PF_FUNC_RID_FUNC_NUM_M) >>
- PF_FUNC_RID_FUNC_NUM_S;
+ hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID));
status = ice_reset(hw, ICE_RESET_PFR);
if (status)
@@ -1200,10 +1030,10 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- /* Enable FW logging. Not fatal if this fails. */
- status = ice_cfg_fw_log(hw, true);
+ status = ice_fwlog_init(hw);
if (status)
- ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
+ ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
+ status);
status = ice_clear_pf_cfg(hw);
if (status)
@@ -1258,7 +1088,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
status = -ENOMEM;
goto err_unroll_sched;
@@ -1268,7 +1098,6 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
NULL);
- devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
status);
@@ -1295,18 +1124,15 @@ int ice_init_hw(struct ice_hw *hw)
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
- mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
- sizeof(struct ice_aqc_manage_mac_read_resp),
- GFP_KERNEL);
- mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
-
+ mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
+ GFP_KERNEL);
if (!mac_buf) {
status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
- devm_kfree(ice_hw_to_dev(hw), mac_buf);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -1322,6 +1148,8 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
mutex_init(&hw->tnl_lock);
+ ice_init_chk_recipe_reuse_support(hw);
+
return 0;
err_unroll_fltr_mgmt_struct:
@@ -1354,8 +1182,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
- /* Attempt to disable FW logging before shutting down control queues */
- ice_cfg_fw_log(hw, false);
+ ice_fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -1374,8 +1201,8 @@ int ice_check_reset(struct ice_hw *hw)
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
- grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
+ grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M,
+ rd32(hw, GLGEN_RSTCTL)) + 10;
for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100);
@@ -1576,9 +1403,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index)
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
@@ -1797,6 +1623,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_set_port_params:
case ice_aqc_opc_get_vlan_mode_parameters:
case ice_aqc_opc_set_vlan_mode_parameters:
+ case ice_aqc_opc_set_tx_topo:
+ case ice_aqc_opc_get_tx_topo:
case ice_aqc_opc_add_recipe:
case ice_aqc_opc_recipe_to_profile:
case ice_aqc_opc_get_recipe:
@@ -2353,6 +2181,9 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
break;
+ case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ caps->tx_sched_topo_comp_mode_en = (number == 1);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2459,7 +2290,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+ info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
@@ -2660,11 +2491,12 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
- info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
+ info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number);
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+ info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2685,6 +2517,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_ena);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
info->ts_ll_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
+ info->ts_ll_int_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2711,6 +2545,26 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
+ * enabled sensors.
+ */
+static void
+ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ dev_p->supported_sensors = le32_to_cpu(cap->number);
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "dev caps: supported sensors (bitmap) = 0x%x\n",
+ dev_p->supported_sensors);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2755,9 +2609,12 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case ICE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_SENSOR_READING:
+ ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -3428,19 +3285,14 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_hw *hw;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
- hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
- GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
-
- devm_kfree(ice_hw_to_dev(hw), pcaps);
}
return status;
@@ -3581,8 +3433,8 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
- struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_hw *hw;
int status;
@@ -3592,7 +3444,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
*aq_failures = 0;
hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
@@ -3644,7 +3496,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
}
out:
- devm_kfree(ice_hw_to_dev(hw), pcaps);
return status;
}
@@ -3723,7 +3574,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree) = NULL;
struct ice_hw *hw;
int status;
@@ -3792,8 +3643,6 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
}
out:
- kfree(pcaps);
-
return status;
}
@@ -4072,6 +3921,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
+ u16 i2c_bus_addr;
int status;
if (!data || (mem_addr & 0xff00))
@@ -4082,15 +3932,13 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
- cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
- ICE_AQC_SFF_I2CBUS_7BIT_M) |
- ((set_page <<
- ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
- ICE_AQC_SFF_SET_EEPROM_PAGE_M));
- cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
- cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
+ i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
+ FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page);
if (write)
- cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
+ i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE;
+ cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr);
+ cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
+ cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M);
status = ice_aq_send_cmd(hw, &desc, data, length, cd);
return status;
@@ -4345,6 +4193,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
+ u16 vmvf_and_timeout;
u16 i, sz = 0;
int status;
@@ -4360,27 +4209,26 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
cmd->num_entries = num_qgrps;
- cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
- ICE_AQC_Q_DIS_TIMEOUT_M);
+ vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5);
switch (rst_src) {
case ICE_VM_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
- cmd->vmvf_and_timeout |=
- cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M;
break;
case ICE_VF_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
/* In this case, FW expects vmvf_num to be absolute VF ID */
- cmd->vmvf_and_timeout |=
- cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
- ICE_AQC_Q_DIS_VMVF_NUM_M);
+ vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M;
break;
case ICE_NO_RESET:
default:
break;
}
+ cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout);
+
/* flush pipe on time out */
cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
/* If no queue group info, we are in a reset flow. Issue the AQ */
@@ -4455,10 +4303,8 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
- cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) &
- ICE_AQC_Q_CFG_DST_PRT_M;
- cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) &
- ICE_AQC_Q_CFG_TIMEOUT_M;
+ cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
+ cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
cmd->blocked_cgds = 0;
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -4516,13 +4362,13 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_write_byte - write a byte to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_byte - write a byte to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -4533,14 +4379,11 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = (u8)(BIT(ce_info->width) - 1);
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
src_byte = *from;
- src_byte &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_byte <<= shift_width;
+ src_byte &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4555,13 +4398,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_word - write a word to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_word - write a word to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -4573,17 +4416,14 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = BIT(ce_info->width) - 1;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_word = *(u16 *)from;
- src_word &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_word <<= shift_width;
+ src_word &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4598,13 +4438,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_dword - write a dword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_dword - write a dword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -4616,25 +4456,14 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 32 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 5 bits so the shift will do nothing
- */
- if (ce_info->width < 32)
- mask = BIT(ce_info->width) - 1;
- else
- mask = (u32)~0;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_dword = *(u32 *)from;
- src_dword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_dword <<= shift_width;
+ src_dword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4649,13 +4478,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_qword - write a qword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_qword - write a qword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -4667,25 +4496,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 64 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 6 bits so the shift will do nothing
- */
- if (ce_info->width < 64)
- mask = BIT_ULL(ce_info->width) - 1;
- else
- mask = (u64)~0;
+ mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_qword = *(u64 *)from;
- src_qword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_qword <<= shift_width;
+ src_qword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4704,11 +4522,10 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
- * @ce_info: a description of the structure to be transformed
+ * @ce_info: List of Rx context elements
*/
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -4724,16 +4541,16 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
}
switch (ce_info[f].size_of) {
case sizeof(u8):
- ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u16):
- ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u32):
- ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u64):
- ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
return -EINVAL;
@@ -4891,7 +4708,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
@@ -5113,7 +4930,7 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
@@ -5332,7 +5149,6 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
u8 *eec_mode)
{
struct ice_aqc_get_cgu_dpll_status *cmd;
- const s64 nsec_per_psec = 1000LL;
struct ice_aq_desc desc;
int status;
@@ -5348,8 +5164,7 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
*phase_offset = le32_to_cpu(cmd->phase_offset_h);
*phase_offset <<= 32;
*phase_offset += le32_to_cpu(cmd->phase_offset_l);
- *phase_offset = div64_s64(sign_extend64(*phase_offset, 47),
- nsec_per_psec);
+ *phase_offset = sign_extend64(*phase_offset, 47);
*eec_mode = cmd->eec_mode;
}
@@ -5541,6 +5356,35 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
}
/**
+ * ice_aq_get_sensor_reading
+ * @hw: pointer to the HW struct
+ * @data: pointer to data to be read from the sensor
+ *
+ * Get sensor reading (0x0632)
+ */
+int ice_aq_get_sensor_reading(struct ice_hw *hw,
+ struct ice_aqc_get_sensor_reading_resp *data)
+{
+ struct ice_aqc_get_sensor_reading *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
+ cmd = &desc.params.get_sensor_reading;
+#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
+#define ICE_INTERNAL_TEMP_SENSOR 0
+ cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
+ cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ memcpy(data, &desc.params.get_sensor_reading_resp,
+ sizeof(*data));
+
+ return status;
+}
+
+/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct
*
@@ -5935,7 +5779,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
- ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
+ ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf);
ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
ICE_LINK_OVERRIDE_PHY_CFG_S;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 31fdcac33986..ffb22c7ce28b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
+#include "ice.h"
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
@@ -52,9 +53,8 @@ int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index);
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -71,9 +71,8 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
@@ -111,6 +110,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
+bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
@@ -199,7 +199,6 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
-void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
@@ -241,6 +240,8 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
int
ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
u8 *flags, u16 *node_handle);
+int ice_aq_get_sensor_reading(struct ice_hw *hw,
+ struct ice_aqc_get_sensor_reading_resp *data);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
@@ -249,6 +250,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
+bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e7d2474c431c..ffe660f34992 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -666,7 +666,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw)
/* The device sideband queue is only supported on devices with the
* generic MAC type.
*/
- return hw->mac_type == ICE_MAC_GENERIC;
+ return ice_is_generic_mac(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 396e555023aa..74418c445cc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -35,8 +35,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M;
- cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M;
+ cmd->type |= FIELD_PREP(ICE_AQ_LLDP_BRID_TYPE_M, bridge_type);
desc.datalen = cpu_to_le16(buf_size);
@@ -147,8 +146,7 @@ static u8 ice_get_dcbx_status(struct ice_hw *hw)
u32 reg;
reg = rd32(hw, PRTDCB_GENS);
- return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >>
- PRTDCB_GENS_DCBX_STATUS_S);
+ return FIELD_GET(PRTDCB_GENS_DCBX_STATUS_M, reg);
}
/**
@@ -174,11 +172,9 @@ ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
*/
for (i = 0; i < 4; i++) {
ets_cfg->prio_table[i * 2] =
- ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >>
- ICE_IEEE_ETS_PRIO_1_S);
+ FIELD_GET(ICE_IEEE_ETS_PRIO_1_M, buf[offset]);
ets_cfg->prio_table[i * 2 + 1] =
- ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >>
- ICE_IEEE_ETS_PRIO_0_S);
+ FIELD_GET(ICE_IEEE_ETS_PRIO_0_M, buf[offset]);
offset++;
}
@@ -222,11 +218,9 @@ ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv,
* |1bit | 1bit|3 bits|3bits|
*/
etscfg = &dcbcfg->etscfg;
- etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >>
- ICE_IEEE_ETS_WILLING_S);
- etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S);
- etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >>
- ICE_IEEE_ETS_MAXTC_S);
+ etscfg->willing = FIELD_GET(ICE_IEEE_ETS_WILLING_M, buf[0]);
+ etscfg->cbs = FIELD_GET(ICE_IEEE_ETS_CBS_M, buf[0]);
+ etscfg->maxtcs = FIELD_GET(ICE_IEEE_ETS_MAXTC_M, buf[0]);
/* Begin parsing at Priority Assignment Table (offset 1 in buf) */
ice_parse_ieee_ets_common_tlv(&buf[1], etscfg);
@@ -268,11 +262,9 @@ ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv,
* -----------------------------------------
* |1bit | 1bit|2 bits|4bits| 1 octet |
*/
- dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >>
- ICE_IEEE_PFC_WILLING_S);
- dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S);
- dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >>
- ICE_IEEE_PFC_CAP_S);
+ dcbcfg->pfc.willing = FIELD_GET(ICE_IEEE_PFC_WILLING_M, buf[0]);
+ dcbcfg->pfc.mbc = FIELD_GET(ICE_IEEE_PFC_MBC_M, buf[0]);
+ dcbcfg->pfc.pfccap = FIELD_GET(ICE_IEEE_PFC_CAP_M, buf[0]);
dcbcfg->pfc.pfcena = buf[1];
}
@@ -294,7 +286,7 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
u8 *buf;
typelen = ntohs(tlv->typelen);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
buf = tlv->tlvinfo;
/* Removing sizeof(ouisubtype) and reserved byte from len.
@@ -314,12 +306,10 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
* -----------------------------------------
*/
while (offset < len) {
- dcbcfg->app[i].priority = ((buf[offset] &
- ICE_IEEE_APP_PRIO_M) >>
- ICE_IEEE_APP_PRIO_S);
- dcbcfg->app[i].selector = ((buf[offset] &
- ICE_IEEE_APP_SEL_M) >>
- ICE_IEEE_APP_SEL_S);
+ dcbcfg->app[i].priority = FIELD_GET(ICE_IEEE_APP_PRIO_M,
+ buf[offset]);
+ dcbcfg->app[i].selector = FIELD_GET(ICE_IEEE_APP_SEL_M,
+ buf[offset]);
dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) |
buf[offset + 2];
/* Move to next app */
@@ -347,8 +337,7 @@ ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u8 subtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
- ICE_LLDP_TLV_SUBTYPE_S);
+ subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype);
switch (subtype) {
case ICE_IEEE_SUBTYPE_ETS_CFG:
ice_parse_ieee_etscfg_tlv(tlv, dcbcfg);
@@ -399,11 +388,9 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
*/
for (i = 0; i < 4; i++) {
etscfg->prio_table[i * 2] =
- ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >>
- ICE_CEE_PGID_PRIO_1_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_1_M, buf[offset]);
etscfg->prio_table[i * 2 + 1] =
- ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >>
- ICE_CEE_PGID_PRIO_0_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_0_M, buf[offset]);
offset++;
}
@@ -466,7 +453,7 @@ ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u8 i;
typelen = ntohs(tlv->hdr.typelen);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
dcbcfg->numapps = len / sizeof(*app);
if (!dcbcfg->numapps)
@@ -521,14 +508,13 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u32 ouisubtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
- ICE_LLDP_TLV_SUBTYPE_S);
+ subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype);
/* Return if not CEE DCBX */
if (subtype != ICE_CEE_DCBX_TYPE)
return;
typelen = ntohs(tlv->typelen);
- tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ tlvlen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
len = sizeof(tlv->typelen) + sizeof(ouisubtype) +
sizeof(struct ice_cee_ctrl_tlv);
/* Return if no CEE DCBX Feature TLVs */
@@ -540,9 +526,8 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u16 sublen;
typelen = ntohs(sub_tlv->hdr.typelen);
- sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
- subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >>
- ICE_LLDP_TLV_TYPE_S);
+ sublen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
+ subtype = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen);
switch (subtype) {
case ICE_CEE_SUBTYPE_PG_CFG:
ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
@@ -579,7 +564,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u32 oui;
ouisubtype = ntohl(tlv->ouisubtype);
- oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S);
+ oui = FIELD_GET(ICE_LLDP_TLV_OUI_M, ouisubtype);
switch (oui) {
case ICE_IEEE_8021QAZ_OUI:
ice_parse_ieee_tlv(tlv, dcbcfg);
@@ -616,8 +601,8 @@ static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
tlv = (struct ice_lldp_org_tlv *)lldpmib;
while (1) {
typelen = ntohs(tlv->typelen);
- type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ type = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += sizeof(typelen) + len;
/* END TLV or beyond LLDPDU size */
@@ -806,11 +791,11 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*/
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
dcbcfg->etscfg.prio_table[i * 2] =
- ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >>
- ICE_CEE_PGID_PRIO_0_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_0_M,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prio_table[i * 2 + 1] =
- ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >>
- ICE_CEE_PGID_PRIO_1_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_1_M,
+ cee_cfg->oper_prio_tc[i]);
}
ice_for_each_traffic_class(i) {
@@ -982,7 +967,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
- change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
+ change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
@@ -1483,7 +1468,7 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
while (1) {
ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
typelen = ntohs(tlv->typelen);
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
if (len)
offset += len + 2;
/* END TLV or beyond LLDPDU size */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 850db8e0e6b0..a94e7072b570 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -3,7 +3,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
@@ -291,7 +291,6 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
switch (vsi->type) {
case ICE_VSI_CHNL:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
if (ena)
ice_ena_vsi(vsi, locked);
@@ -776,8 +775,7 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
/* no need to proceed with remaining cfg if it is CHNL
* or switchdev VSI
*/
- if (vsi->type == ICE_VSI_CHNL ||
- vsi->type == ICE_VSI_SWITCHDEV_CTRL)
+ if (vsi->type == ICE_VSI_CHNL)
continue;
ice_vsi_map_rings_to_vectors(vsi);
@@ -934,7 +932,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
skb->priority != TC_PRIO_CONTROL) {
first->vid &= ~VLAN_PRIO_MASK;
/* Mask the lower 3 bits to set the 802.1p priority */
- first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+ first->vid |= FIELD_PREP(VLAN_PRIO_MASK, skb->priority);
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index e1fbc6de452d..6d50b90a7359 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -227,7 +227,7 @@ static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, PRTDCB_GENC);
- *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
+ *delay = FIELD_GET(PRTDCB_GENC_PFCLDA_M, val);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index cfb1580f5850..ce5034ed2b24 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -4,6 +4,7 @@
#include "ice_common.h"
#include "ice.h"
#include "ice_ddp.h"
+#include "ice_sched.h"
/* For supporting double VLAN mode, it is necessary to enable or disable certain
* boost tcam entries. The metadata labels names that match the following
@@ -721,6 +722,12 @@ static bool ice_is_gtp_c_profile(u16 prof_idx)
}
}
+static bool ice_is_pfcp_profile(u16 prof_idx)
+{
+ return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE &&
+ prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION;
+}
+
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
@@ -738,6 +745,9 @@ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
if (ice_is_gtp_u_profile(prof_idx))
return ICE_PROF_TUN_GTPU;
+ if (ice_is_pfcp_profile(prof_idx))
+ return ICE_PROF_TUN_PFCP;
+
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@ -1424,14 +1434,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
goto exit;
}
- conf_idx = le32_to_cpu(seg->signed_seg_idx);
- start = le32_to_cpu(seg->signed_buf_start);
count = le32_to_cpu(seg->signed_buf_count);
-
state = ice_download_pkg_sig_seg(hw, seg);
- if (state)
+ if (state || !count)
goto exit;
+ conf_idx = le32_to_cpu(seg->signed_seg_idx);
+ start = le32_to_cpu(seg->signed_buf_start);
+
state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
count);
@@ -1479,14 +1489,14 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw)
}
/**
- * ice_download_pkg
+ * ice_download_pkg_with_sig_seg
* @hw: pointer to the hardware structure
* @pkg_hdr: pointer to package header
*
* Handles the download of a complete package.
*/
static enum ice_ddp_state
-ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
enum ice_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
@@ -1519,6 +1529,103 @@ ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
state = ice_post_dwnld_pkg_actions(hw);
ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_ddp_state state;
+ struct ice_buf_hdr *bh;
+ int status;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
+ }
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_download_pkg_without_sig_seg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package without signature segment.
+ */
+static enum ice_ddp_state
+ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ le32_to_cpu(ice_seg->hdr.seg_type),
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_ddp_state
+ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ struct ice_seg *ice_seg)
+{
+ enum ice_ddp_state state;
+
+ if (hw->pkg_has_signing_seg)
+ state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
+ else
+ state = ice_download_pkg_without_sig_seg(hw, ice_seg);
+
ice_post_pkg_dwnld_vlan_mode_cfg(hw);
return state;
@@ -1728,6 +1835,7 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
seg_id = SEGMENT_TYPE_ICE_E830;
break;
case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
default:
seg_id = SEGMENT_TYPE_ICE_E810;
break;
@@ -1748,6 +1856,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
case ICE_MAC_E830:
sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825;
+ break;
case ICE_MAC_GENERIC:
default:
sign_type = SEGMENT_SIGN_TYPE_RSA2K;
@@ -1837,8 +1948,8 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg_info);
u32 i;
@@ -1889,8 +2000,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg);
enum ice_ddp_state state;
u32 i;
@@ -2083,7 +2194,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
- state = ice_download_pkg(hw, pkg);
+ state = ice_download_pkg(hw, pkg, seg);
if (state == ICE_DDP_PKG_ALREADY_LOADED) {
ice_debug(hw, ICE_DBG_INIT,
"package previously loaded - no work.\n");
@@ -2162,3 +2273,211 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return state;
}
+
+/**
+ * ice_get_set_tx_topo - get or set Tx topology
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @buf_size: buffer size
+ * @cd: pointer to command details structure or NULL
+ * @flags: pointer to descriptor flags
+ * @set: 0-get, 1-set topology
+ *
+ * The function will get or set Tx topology
+ *
+ * Return: zero when set was successful, negative values otherwise.
+ */
+static int
+ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
+ struct ice_sq_cd *cd, u8 *flags, bool set)
+{
+ struct ice_aqc_get_set_tx_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ cmd = &desc.params.get_set_tx_topo;
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
+ cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
+ /* requested to update a new topology, not a default topology */
+ if (buf)
+ cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
+ ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+
+ if (ice_is_e825c(hw))
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
+ cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+ }
+
+ if (!ice_is_e825c(hw))
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+ /* read the return flag values (first byte) for get operation */
+ if (!set && flags)
+ *flags = desc.params.get_set_tx_topo.set_flags;
+
+ return 0;
+}
+
+/**
+ * ice_cfg_tx_topo - Initialize new Tx topology if available
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @len: buffer size
+ *
+ * The function will apply the new Tx topology from the package buffer
+ * if available.
+ *
+ * Return: zero when update was successful, negative values otherwise.
+ */
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ u8 *current_topo, *new_topo = NULL;
+ struct ice_run_time_cfg_seg *seg;
+ struct ice_buf_hdr *section;
+ struct ice_pkg_hdr *pkg_hdr;
+ enum ice_ddp_state state;
+ u16 offset, size = 0;
+ u32 reg = 0;
+ int status;
+ u8 flags;
+
+ if (!buf || !len)
+ return -EINVAL;
+
+ /* Does FW support new Tx topology mode ? */
+ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
+ ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!current_topo)
+ return -ENOMEM;
+
+ /* Get the current Tx topology */
+ status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
+ &flags, false);
+
+ kfree(current_topo);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
+ return status;
+ }
+
+ /* Is default topology already applied ? */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n");
+ return -EEXIST;
+ }
+
+ /* Is new topology already applied ? */
+ if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n");
+ return -EEXIST;
+ }
+
+ /* Setting topology already issued? */
+ if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
+ ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n");
+ /* Add a small delay before exiting */
+ msleep(2000);
+ return -EEXIST;
+ }
+
+ /* Change the topology from new to default (5 to 9) */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
+ goto update_topo;
+ }
+
+ pkg_hdr = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg_hdr, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
+ state);
+ return -EIO;
+ }
+
+ /* Find runtime configuration segment */
+ seg = (struct ice_run_time_cfg_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
+ if (!seg) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
+ return -EIO;
+ }
+
+ if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
+ seg->buf_table.buf_count);
+ return -EIO;
+ }
+
+ section = ice_pkg_val_buf(seg->buf_table.buf_array);
+ if (!section || le32_to_cpu(section->section_entry[0].type) !=
+ ICE_SID_TX_5_LAYER_TOPO) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
+ return -EIO;
+ }
+
+ size = le16_to_cpu(section->section_entry[0].size);
+ offset = le16_to_cpu(section->section_entry[0].offset);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
+ return -EIO;
+ }
+
+ /* Make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
+ return -EIO;
+ }
+
+ /* Get the new topology buffer */
+ new_topo = ((u8 *)section) + offset;
+
+update_topo:
+ /* Acquire global lock to make sure that set topology issued
+ * by one PF.
+ */
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
+ return status;
+ }
+
+ /* Check if reset was triggered already. */
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (reg & GLGEN_RSTAT_DEVSTATE_M) {
+ /* Reset is in progress, re-init the HW again */
+ ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
+ ice_check_reset(hw);
+ return 0;
+ }
+
+ /* Set new topology */
+ status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
+ return status;
+ }
+
+ /* New topology is updated, delay 1 second before issuing the CORER */
+ msleep(1000);
+ ice_reset(hw, ICE_RESET_CORER);
+ /* CORER will clear the global lock, so no explicit call
+ * required for release.
+ */
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index ff66c2ffb1a2..622543f08b43 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -454,4 +454,6 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
+
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
new file mode 100644
index 000000000000..9fc0fd95a13d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -0,0 +1,674 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+#include "ice.h"
+
+static struct dentry *ice_debugfs_root;
+
+/* create a define that has an extra module that doesn't really exist. this
+ * is so we can add a module 'all' to easily enable/disable all the modules
+ */
+#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
+ */
+static const char * const ice_fwlog_module_string[] = {
+ "general",
+ "ctrl",
+ "link",
+ "link_topo",
+ "dnl",
+ "i2c",
+ "sdp",
+ "mdio",
+ "adminq",
+ "hdma",
+ "lldp",
+ "dcbx",
+ "dcb",
+ "xlr",
+ "nvm",
+ "auth",
+ "vpd",
+ "iosf",
+ "parser",
+ "sw",
+ "scheduler",
+ "txq",
+ "rsvd",
+ "post",
+ "watchdog",
+ "task_dispatch",
+ "mng",
+ "synce",
+ "health",
+ "tsdrv",
+ "pfreg",
+ "mdlver",
+ "all",
+};
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_fwlog_level
+ */
+static const char * const ice_fwlog_level_string[] = {
+ "none",
+ "error",
+ "warning",
+ "normal",
+ "verbose",
+};
+
+static const char * const ice_fwlog_log_size[] = {
+ "128K",
+ "256K",
+ "512K",
+ "1M",
+ "2M",
+};
+
+/**
+ * ice_fwlog_print_module_cfg - print current FW logging module configuration
+ * @hw: pointer to the HW structure
+ * @module: module to print
+ * @s: the seq file to put data into
+ */
+static void
+ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
+{
+ struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
+ struct ice_fwlog_module_entry *entry;
+
+ if (module != ICE_AQC_FW_LOG_ID_MAX) {
+ entry = &cfg->module_entries[module];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ ice_fwlog_module_string[entry->module_id],
+ ice_fwlog_level_string[entry->log_level]);
+ } else {
+ int i;
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ entry = &cfg->module_entries[i];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ ice_fwlog_module_string[entry->module_id],
+ ice_fwlog_level_string[entry->log_level]);
+ }
+ }
+}
+
+static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
+{
+ int i, module;
+
+ module = -1;
+ /* find the module based on the dentry */
+ for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
+ if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
+ module = i;
+ break;
+ }
+ }
+
+ return module;
+}
+
+/**
+ * ice_debugfs_module_show - read from 'module' file
+ * @s: the opened file
+ * @v: pointer to the offset
+ */
+static int ice_debugfs_module_show(struct seq_file *s, void *v)
+{
+ const struct file *filp = s->file;
+ struct dentry *dentry;
+ struct ice_pf *pf;
+ int module;
+
+ dentry = file_dentry(filp);
+ pf = s->private;
+
+ module = ice_find_module_by_dentry(pf, dentry);
+ if (module < 0) {
+ dev_info(ice_pf_to_dev(pf), "unknown module\n");
+ return -EINVAL;
+ }
+
+ ice_fwlog_print_module_cfg(&pf->hw, module, s);
+
+ return 0;
+}
+
+static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, ice_debugfs_module_show, inode->i_private);
+}
+
+/**
+ * ice_debugfs_module_write - write into 'module' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_module_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = file_inode(filp)->i_private;
+ struct dentry *dentry = file_dentry(filp);
+ struct device *dev = ice_pf_to_dev(pf);
+ char user_val[16], *cmd_buf;
+ int module, log_level, cnt;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 8)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ module = ice_find_module_by_dentry(pf, dentry);
+ if (module < 0) {
+ dev_info(dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ cnt = sscanf(cmd_buf, "%s", user_val);
+ if (cnt != 1)
+ return -EINVAL;
+
+ log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
+ if (log_level < 0) {
+ dev_info(dev, "unknown log level '%s'\n", user_val);
+ return -EINVAL;
+ }
+
+ if (module != ICE_AQC_FW_LOG_ID_MAX) {
+ ice_pf_fwlog_update_module(pf, log_level, module);
+ } else {
+ /* the module 'all' is a shortcut so that we can set
+ * all of the modules to the same level quickly
+ */
+ int i;
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
+ ice_pf_fwlog_update_module(pf, log_level, i);
+ }
+
+ return count;
+}
+
+static const struct file_operations ice_debugfs_module_fops = {
+ .owner = THIS_MODULE,
+ .open = ice_debugfs_module_open,
+ .read = seq_read,
+ .release = single_release,
+ .write = ice_debugfs_module_write,
+};
+
+/**
+ * ice_debugfs_nr_messages_read - read from 'nr_messages' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%d\n",
+ hw->fwlog_cfg.log_resolution);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_nr_messages_write - write into 'nr_messages' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ s16 nr_messages;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 4)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtos16(user_val, 0, &nr_messages);
+ if (ret)
+ return ret;
+
+ if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
+ nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
+ dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
+ nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
+ ICE_AQC_FW_LOG_MAX_RESOLUTION);
+ return -EINVAL;
+ }
+
+ hw->fwlog_cfg.log_resolution = nr_messages;
+
+ return count;
+}
+
+static const struct file_operations ice_debugfs_nr_messages_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_nr_messages_read,
+ .write = ice_debugfs_nr_messages_write,
+};
+
+/**
+ * ice_debugfs_enable_read - read from 'enable' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_enable_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%u\n",
+ (u16)(hw->fwlog_cfg.options &
+ ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_enable_write - write into 'enable' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_enable_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ bool enable;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 2)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtobool(user_val, &enable);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ else
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
+
+ ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ ret = ice_fwlog_register(hw);
+ else
+ ret = ice_fwlog_unregister(hw);
+
+ if (ret)
+ goto enable_write_error;
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+enable_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_enable_read,
+ .write = ice_debugfs_enable_write,
+};
+
+/**
+ * ice_debugfs_log_size_read - read from 'log_size' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_log_size_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+ int index;
+
+ index = hw->fwlog_ring.index;
+ snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_log_size_write - write into 'log_size' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ ssize_t ret;
+ int index;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 5)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = sysfs_match_string(ice_fwlog_log_size, user_val);
+ if (index < 0) {
+ dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
+ user_val);
+ ret = -EINVAL;
+ goto log_size_write_error;
+ } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
+ dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
+ ret = -EINVAL;
+ goto log_size_write_error;
+ }
+
+ /* free all the buffers and the tracking info and resize */
+ ice_fwlog_realloc_rings(hw, index);
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+log_size_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_log_size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_log_size_read,
+ .write = ice_debugfs_log_size_write,
+};
+
+/**
+ * ice_debugfs_data_read - read from 'data' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ int data_copied = 0;
+ bool done = false;
+
+ if (ice_fwlog_ring_empty(&hw->fwlog_ring))
+ return 0;
+
+ while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
+ struct ice_fwlog_data *log;
+ u16 cur_buf_len;
+
+ log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
+ cur_buf_len = log->data_size;
+ if (cur_buf_len >= count) {
+ done = true;
+ continue;
+ }
+
+ if (copy_to_user(buffer, log->data, cur_buf_len)) {
+ /* if there is an error then bail and return whatever
+ * the driver has copied so far
+ */
+ done = true;
+ continue;
+ }
+
+ data_copied += cur_buf_len;
+ buffer += cur_buf_len;
+ count -= cur_buf_len;
+ *ppos += cur_buf_len;
+ ice_fwlog_ring_increment(&hw->fwlog_ring.head,
+ hw->fwlog_ring.size);
+ }
+
+ return data_copied;
+}
+
+/**
+ * ice_debugfs_data_write - write into 'data' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ ssize_t ret;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ /* any value is allowed to clear the buffer so no need to even look at
+ * what the value is
+ */
+ if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
+ hw->fwlog_ring.head = 0;
+ hw->fwlog_ring.tail = 0;
+ } else {
+ dev_info(dev, "Can't clear FW log data while FW log running\n");
+ ret = -EINVAL;
+ goto nr_buffs_write_error;
+ }
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+nr_buffs_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_data_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_data_read,
+ .write = ice_debugfs_data_write,
+};
+
+/**
+ * ice_debugfs_fwlog_init - setup the debugfs directory
+ * @pf: the ice that is starting up
+ */
+void ice_debugfs_fwlog_init(struct ice_pf *pf)
+{
+ const char *name = pci_name(pf->pdev);
+ struct dentry *fw_modules_dir;
+ struct dentry **fw_modules;
+ int i;
+
+ /* only support fw log commands on PF 0 */
+ if (pf->hw.bus.func)
+ return;
+
+ /* allocate space for this first because if it fails then we don't
+ * need to unwind
+ */
+ fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
+ GFP_KERNEL);
+ if (!fw_modules)
+ return;
+
+ pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
+ if (IS_ERR(pf->ice_debugfs_pf))
+ goto err_create_module_files;
+
+ pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
+ pf->ice_debugfs_pf);
+ if (IS_ERR(pf->ice_debugfs_pf))
+ goto err_create_module_files;
+
+ fw_modules_dir = debugfs_create_dir("modules",
+ pf->ice_debugfs_pf_fwlog);
+ if (IS_ERR(fw_modules_dir))
+ goto err_create_module_files;
+
+ for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
+ fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
+ 0600, fw_modules_dir, pf,
+ &ice_debugfs_module_fops);
+ if (IS_ERR(fw_modules[i]))
+ goto err_create_module_files;
+ }
+
+ debugfs_create_file("nr_messages", 0600,
+ pf->ice_debugfs_pf_fwlog, pf,
+ &ice_debugfs_nr_messages_fops);
+
+ pf->ice_debugfs_pf_fwlog_modules = fw_modules;
+
+ debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_enable_fops);
+
+ debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_log_size_fops);
+
+ debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_data_fops);
+
+ return;
+
+err_create_module_files:
+ debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
+ kfree(fw_modules);
+}
+
+/**
+ * ice_debugfs_pf_deinit - cleanup PF's debugfs
+ * @pf: pointer to the PF struct
+ */
+void ice_debugfs_pf_deinit(struct ice_pf *pf)
+{
+ debugfs_remove_recursive(pf->ice_debugfs_pf);
+ pf->ice_debugfs_pf = NULL;
+}
+
+/**
+ * ice_debugfs_init - create root directory for debugfs entries
+ */
+void ice_debugfs_init(void)
+{
+ ice_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR(ice_debugfs_root))
+ pr_info("init of debugfs failed\n");
+}
+
+/**
+ * ice_debugfs_exit - remove debugfs entries
+ */
+void ice_debugfs_exit(void)
+{
+ debugfs_remove_recursive(ice_debugfs_root);
+ ice_debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index a2d384dbfc76..34fd604132f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -16,14 +16,26 @@
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E830-CC for backplane */
+#define ICE_DEV_ID_E830CC_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-CC for QSFP */
+#define ICE_DEV_ID_E830CC_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-CC for SFP */
+#define ICE_DEV_ID_E830CC_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-CC for SFP-DD */
+#define ICE_DEV_ID_E830CC_SFP_DD 0x12D4
/* Intel(R) Ethernet Controller E830-C for backplane */
-#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5
/* Intel(R) Ethernet Controller E830-C for QSFP */
-#define ICE_DEV_ID_E830_QSFP56 0x12D2
+#define ICE_DEV_ID_E830C_QSFP 0x12D8
/* Intel(R) Ethernet Controller E830-C for SFP */
-#define ICE_DEV_ID_E830_SFP 0x12D3
-/* Intel(R) Ethernet Controller E830-C for SFP-DD */
-#define ICE_DEV_ID_E830_SFP_DD 0x12D4
+#define ICE_DEV_ID_E830C_SFP 0x12DA
+/* Intel(R) Ethernet Controller E830-XXV for backplane */
+#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC
+/* Intel(R) Ethernet Controller E830-XXV for QSFP */
+#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD
+/* Intel(R) Ethernet Controller E830-XXV for SFP */
+#define ICE_DEV_ID_E830_XXV_SFP 0x12DE
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
@@ -71,5 +83,13 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Connection E825-C for backplane */
+#define ICE_DEV_ID_E825C_BACKPLANE 0x579c
+/* Intel(R) Ethernet Connection E825-C for QSFP */
+#define ICE_DEV_ID_E825C_QSFP 0x579d
+/* Intel(R) Ethernet Connection E825-C for SFP */
+#define ICE_DEV_ID_E825C_SFP 0x579e
+/* Intel(R) Ethernet Connection E825-C 1GbE */
+#define ICE_DEV_ID_E825C_SGMII 0x579f
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 835c419ccc74..e92be6f130a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -31,6 +31,26 @@ static const char * const pin_type_name[] = {
};
/**
+ * ice_dpll_is_reset - check if reset is in progress
+ * @pf: private board structure
+ * @extack: error reporting
+ *
+ * If reset is in progress, fill extack with error.
+ *
+ * Return:
+ * * false - no reset in progress
+ * * true - reset in progress
+ */
+static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack)
+{
+ if (ice_is_reset_in_progress(pf->state)) {
+ NL_SET_ERR_MSG(extack, "PF reset in progress");
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_dpll_pin_freq_set - set pin's frequency
* @pf: private board structure
* @pin: pointer to a pin
@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
mutex_unlock(&pf->dplls.lock);
@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
+ * @dpll_idx: dpll index to connect to output pin
* @pin_type: type of pin being enabled
* @extack: error reporting
*
@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
*/
static int
ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
- enum ice_dpll_pin_type pin_type,
+ u8 dpll_idx, enum ice_dpll_pin_type pin_type,
struct netlink_ext_ack *extack)
{
u8 flags = 0;
@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL;
if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
- ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx,
+ 0, 0);
break;
default:
return -EINVAL;
@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
case ICE_DPLL_PIN_TYPE_INPUT:
ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
NULL, &pin->flags[0],
- &pin->freq, NULL);
+ &pin->freq, &pin->phase_adjust);
if (ret)
goto err;
if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
- &pin->flags[0], NULL,
+ &pin->flags[0], &parent,
&pin->freq, NULL);
if (ret)
goto err;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
- pin->state[0] = DPLL_PIN_STATE_CONNECTED;
- else
- pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+
+ parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ parent == pf->dplls.eec.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ parent == pf->dplls.pps.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
for (parent = 0; parent < pf->dplls.rclk.num_parents;
@@ -488,6 +527,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
* @status: on success holds dpll's lock status
+ * @status_error: status error value
* @extack: error reporting
*
* Dpll subsystem callback, provides dpll's lock status.
@@ -500,6 +540,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
static int
ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
struct netlink_ext_ack *extack)
{
struct ice_dpll *d = dpll_priv;
@@ -513,31 +554,6 @@ ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
- * ice_dpll_mode_supported - check if dpll's working mode is supported
- * @dpll: registered dpll pointer
- * @dpll_priv: private data pointer passed on dpll registration
- * @mode: mode to be checked for support
- * @extack: error reporting
- *
- * Dpll subsystem callback. Provides information if working mode is supported
- * by dpll.
- *
- * Return:
- * * true - mode is supported
- * * false - mode is not supported
- */
-static bool ice_dpll_mode_supported(const struct dpll_device *dpll,
- void *dpll_priv,
- enum dpll_mode mode,
- struct netlink_ext_ack *extack)
-{
- if (mode == DPLL_MODE_AUTOMATIC)
- return true;
-
- return false;
-}
-
-/**
* ice_dpll_mode_get - get dpll's working mode
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
@@ -593,9 +609,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
if (enable)
- ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type,
+ extack);
else
ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
if (!ret)
@@ -628,6 +648,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct netlink_ext_ack *extack)
{
bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+
+ if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
+ return 0;
return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
extack, ICE_DPLL_PIN_TYPE_OUTPUT);
@@ -690,14 +715,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
if (ret)
goto unlock;
- if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT ||
+ pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
*state = p->state[d->dpll_idx];
- else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
- *state = p->state[0];
ret = 0;
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -815,11 +842,8 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
- if (prio > ICE_DPLL_PRIO_MAX) {
- NL_SET_ERR_MSG_FMT(extack, "prio out of supported range 0-%d",
- ICE_DPLL_PRIO_MAX);
- return -EINVAL;
- }
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
@@ -941,6 +965,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
u8 flag, flags_en = 0;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
switch (type) {
case ICE_DPLL_PIN_TYPE_INPUT:
@@ -1100,6 +1127,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1154,6 +1184,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1203,7 +1236,6 @@ static const struct dpll_pin_ops ice_dpll_output_ops = {
static const struct dpll_device_ops ice_dpll_ops = {
.lock_status_get = ice_dpll_lock_status_get,
- .mode_supported = ice_dpll_mode_supported,
.mode_get = ice_dpll_mode_get,
};
@@ -1337,8 +1369,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
- int ret;
+ int ret = 0;
+ if (ice_is_reset_in_progress(pf->state))
+ goto resched;
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
@@ -1358,6 +1392,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+resched:
/* Run twice a second or reschedule if update failed */
kthread_queue_delayed_work(d->kworker, &d->work,
ret ? msecs_to_jiffies(10) :
@@ -1564,7 +1599,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
}
if (WARN_ON_ONCE(!vsi || !vsi->netdev))
return;
- netdev_dpll_pin_clear(vsi->netdev);
+ dpll_netdev_pin_clear(vsi->netdev);
dpll_pin_put(rclk->pin);
}
@@ -1608,7 +1643,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (WARN_ON((!vsi || !vsi->netdev)))
return -EINVAL;
- netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+ dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
@@ -1756,6 +1791,7 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
}
d->pf = pf;
if (cgu) {
+ ice_dpll_update_state(pf, d, true);
ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
if (ret) {
dpll_device_put(d->dpll);
@@ -1796,8 +1832,6 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
struct kthread_worker *kworker;
- ice_dpll_update_state(pf, &d->eec, true);
- ice_dpll_update_state(pf, &d->pps, true);
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
kworker = kthread_create_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
@@ -1830,6 +1864,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
int num_pins, i, ret = -EINVAL;
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
+ unsigned long caps;
u8 freq_supp_num;
bool input;
@@ -1849,6 +1884,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
}
for (i = 0; i < num_pins; i++) {
+ caps = 0;
pins[i].idx = i;
pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input);
pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input);
@@ -1861,8 +1897,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
&dp->input_prio[i]);
if (ret)
return ret;
- pins[i].prop.capabilities |=
- DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
+ caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
pins[i].prop.phase_range.min =
pf->dplls.input_phase_adj_max;
pins[i].prop.phase_range.max =
@@ -1872,9 +1908,11 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
pf->dplls.output_phase_adj_max;
pins[i].prop.phase_range.max =
-pf->dplls.output_phase_adj_max;
+ ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
+ if (ret)
+ return ret;
}
- pins[i].prop.capabilities |=
- DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
return ret;
@@ -2084,6 +2122,7 @@ void ice_dpll_init(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
int err = 0;
+ mutex_init(&d->lock);
err = ice_dpll_init_info(pf, cgu);
if (err)
goto err_exit;
@@ -2096,7 +2135,6 @@ void ice_dpll_init(struct ice_pf *pf)
err = ice_dpll_init_pins(pf, cgu);
if (err)
goto deinit_pps;
- mutex_init(&d->lock);
if (cgu) {
err = ice_dpll_init_worker(pf);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index bb32b6d88373..93172e93995b 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -6,7 +6,6 @@
#include "ice.h"
-#define ICE_DPLL_PRIO_MAX 0xF
#define ICE_DPLL_RCLK_NUM_MAX 4
/** ice_dpll_pin - store info about pins
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index a655d499abfa..b102db8b829a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -7,70 +7,11 @@
#include "ice_eswitch_br.h"
#include "ice_fltr.h"
#include "ice_repr.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
#include "ice_tc_lib.h"
/**
- * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
- * @pf: pointer to PF struct
- * @vf: pointer to VF struct
- *
- * This function adds advanced rule that forwards packets with
- * VF's VSI index to the corresponding switchdev ctrl VSI queue.
- */
-static int
-ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
-{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- struct ice_adv_rule_info rule_info = { 0 };
- struct ice_adv_lkup_elem *list;
- struct ice_hw *hw = &pf->hw;
- const u16 lkups_cnt = 1;
- int err;
-
- list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
- if (!list)
- return -ENOMEM;
-
- ice_rule_add_src_vsi_metadata(list);
-
- rule_info.sw_act.flag = ICE_FLTR_TX;
- rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
- rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
- rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
- ctrl_vsi->rxq_map[vf->vf_id];
- rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
- rule_info.flags_info.act_valid = true;
- rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
- rule_info.src_vsi = vf->lan_vsi_idx;
-
- err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
- &vf->repr->sp_rule);
- if (err)
- dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
- vf->vf_id);
-
- kfree(list);
- return err;
-}
-
-/**
- * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
- * @vf: pointer to the VF struct
- *
- * Delete the advanced rule that was used to forward packets with the VF's VSI
- * index to the corresponding switchdev ctrl VSI queue.
- */
-static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
-{
- if (!vf->repr)
- return;
-
- ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
-}
-
-/**
- * ice_eswitch_setup_env - configure switchdev HW filters
+ * ice_eswitch_setup_env - configure eswitch HW filters
* @pf: pointer to PF struct
*
* This function adds HW filters configuration specific for switchdev
@@ -78,239 +19,153 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
*/
static int ice_eswitch_setup_env(struct ice_pf *pf)
{
- struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
- struct net_device *uplink_netdev = uplink_vsi->netdev;
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
+ struct net_device *netdev = uplink_vsi->netdev;
+ bool if_running = netif_running(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
- bool rule_added = false;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state))
+ if (ice_down(uplink_vsi))
+ return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
- netif_addr_lock_bh(uplink_netdev);
- __dev_uc_unsync(uplink_netdev, NULL);
- __dev_mc_unsync(uplink_netdev, NULL);
- netif_addr_unlock_bh(uplink_netdev);
+ netif_addr_lock_bh(netdev);
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+ netif_addr_unlock_bh(netdev);
if (ice_vsi_add_vlan_zero(uplink_vsi))
+ goto err_vlan_zero;
+
+ if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+ ICE_FLTR_RX))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
- if (ice_set_dflt_vsi(uplink_vsi))
- goto err_def_rx;
- rule_added = true;
- }
+ if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+ ICE_FLTR_TX))
+ goto err_def_tx;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
if (vlan_ops->dis_rx_filtering(uplink_vsi))
- goto err_dis_rx;
+ goto err_vlan_filtering;
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
- if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_control;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
+ if (if_running && ice_up(uplink_vsi))
+ goto err_up;
+
return 0;
+err_up:
+ ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
-err_dis_rx:
- if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi);
+err_vlan_filtering:
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_TX);
+err_def_tx:
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_RX);
err_def_rx:
+ ice_vsi_del_vlan_zero(uplink_vsi);
+err_vlan_zero:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
- return -ENODEV;
-}
-
-/**
- * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
- * @pf: pointer to PF struct
- *
- * In switchdev number of allocated Tx/Rx rings is equal.
- *
- * This function fills q_vectors structures associated with representor and
- * move each ring pairs to port representor netdevs. Each port representor
- * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
- * number of VFs.
- */
-static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = pf->switchdev.control_vsi;
- int q_id;
-
- ice_for_each_txq(vsi, q_id) {
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- struct ice_repr *repr;
- struct ice_vf *vf;
-
- vf = ice_get_vf_by_id(pf, q_id);
- if (WARN_ON(!vf))
- continue;
-
- repr = vf->repr;
- q_vector = repr->q_vector;
- tx_ring = vsi->tx_rings[q_id];
- rx_ring = vsi->rx_rings[q_id];
-
- q_vector->vsi = vsi;
- q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
-
- q_vector->num_ring_tx = 1;
- q_vector->tx.tx_ring = tx_ring;
- tx_ring->q_vector = q_vector;
- tx_ring->next = NULL;
- tx_ring->netdev = repr->netdev;
- /* In switchdev mode, from OS stack perspective, there is only
- * one queue for given netdev, so it needs to be indexed as 0.
- */
- tx_ring->q_index = 0;
-
- q_vector->num_ring_rx = 1;
- q_vector->rx.rx_ring = rx_ring;
- rx_ring->q_vector = q_vector;
- rx_ring->next = NULL;
- rx_ring->netdev = repr->netdev;
+ if (if_running)
+ ice_up(uplink_vsi);
- ice_put_vf(vf);
- }
+ return -ENODEV;
}
/**
- * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * ice_eswitch_release_repr - clear PR VSI configuration
* @pf: poiner to PF struct
- * @ctrl_vsi: pointer to switchdev control VSI
+ * @repr: pointer to PR
*/
static void
-ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_vsi *vsi = vf->repr->src_vsi;
-
- /* Skip VFs that aren't configured */
- if (!vf->repr->dst)
- continue;
+ struct ice_vsi *vsi = repr->src_vsi;
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- ice_eswitch_del_vf_sp_rule(vf);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
+ /* Skip representors that aren't configured */
+ if (!repr->dst)
+ return;
- netif_napi_del(&vf->repr->q_vector->napi);
- }
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(repr->dst);
+ repr->dst = NULL;
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
+ ICE_FWD_TO_VSI);
}
/**
- * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
+ * ice_eswitch_setup_repr - configure PR to run in switchdev mode
* @pf: pointer to PF struct
+ * @repr: pointer to PR struct
*/
-static int ice_eswitch_setup_reprs(struct ice_pf *pf)
+static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- int max_vsi_num = 0;
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_vsi *vsi = vf->repr->src_vsi;
-
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
- vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
- GFP_KERNEL);
- if (!vf->repr->dst) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- goto err;
- }
-
- if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- goto err;
- }
-
- if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- ice_eswitch_del_vf_sp_rule(vf);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- goto err;
- }
-
- if (ice_vsi_add_vlan_zero(vsi)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- ice_eswitch_del_vf_sp_rule(vf);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- goto err;
- }
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
+ struct ice_vsi *vsi = repr->src_vsi;
+ struct metadata_dst *dst;
- if (max_vsi_num < vsi->vsi_num)
- max_vsi_num = vsi->vsi_num;
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!repr->dst)
+ goto err_add_mac_fltr;
- netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
- ice_napi_poll);
+ if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
+ goto err_dst_free;
- netif_keep_dst(vf->repr->netdev);
- }
+ if (ice_vsi_add_vlan_zero(vsi))
+ goto err_update_security;
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_repr *repr = vf->repr;
- struct ice_vsi *vsi = repr->src_vsi;
- struct metadata_dst *dst;
+ netif_keep_dst(uplink_vsi->netdev);
- dst = repr->dst;
- dst->u.port_info.port_id = vsi->vsi_num;
- dst->u.port_info.lower_dev = repr->netdev;
- ice_repr_set_traffic_vsi(repr, ctrl_vsi);
- }
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = uplink_vsi->netdev;
return 0;
-err:
- ice_eswitch_release_reprs(pf, ctrl_vsi);
+err_update_security:
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+err_dst_free:
+ metadata_dst_free(repr->dst);
+ repr->dst = NULL;
+err_add_mac_fltr:
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
return -ENODEV;
}
/**
- * ice_eswitch_update_repr - reconfigure VF port representor
- * @vsi: VF VSI for which port representor is configured
+ * ice_eswitch_update_repr - reconfigure port representor
+ * @repr_id: representor ID
+ * @vsi: VSI for which port representor is configured
*/
-void ice_eswitch_update_repr(struct ice_vsi *vsi)
+void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
- struct ice_vf *vf;
int ret;
if (!ice_is_switchdev_running(pf))
return;
- vf = vsi->vf;
- repr = vf->repr;
+ repr = xa_load(&pf->eswitch.reprs, repr_id);
+ if (!repr)
+ return;
+
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
@@ -319,9 +174,10 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
- dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
- vsi->vf->vf_id);
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
+ ICE_FWD_TO_VSI);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
+ repr->id);
}
}
@@ -335,31 +191,23 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- struct ice_netdev_priv *np;
- struct ice_repr *repr;
- struct ice_vsi *vsi;
-
- np = netdev_priv(netdev);
- vsi = np->vsi;
-
- if (!vsi || !ice_is_switchdev_running(vsi->back))
- return NETDEV_TX_BUSY;
-
- if (ice_is_reset_in_progress(vsi->back->state) ||
- test_bit(ICE_VF_DIS, vsi->back->state))
- return NETDEV_TX_BUSY;
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ unsigned int len = skb->len;
+ int ret;
- repr = ice_netdev_to_repr(netdev);
skb_dst_drop(skb);
dst_hold((struct dst_entry *)repr->dst);
skb_dst_set(skb, (struct dst_entry *)repr->dst);
- skb->queue_mapping = repr->vf->vf_id;
+ skb->dev = repr->dst->u.port_info.lower_dev;
+
+ ret = dev_queue_xmit(skb);
+ ice_repr_inc_tx_stats(repr, len, ret);
- return ice_start_xmit(skb, netdev);
+ return ret;
}
/**
- * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
+ * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
* @skb: pointer to send buffer
* @off: pointer to offload struct
*/
@@ -375,14 +223,14 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
- dst_vsi = ((u64)dst->u.port_info.port_id <<
- ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
+ dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M,
+ dst->u.port_info.port_id);
off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
}
}
/**
- * ice_eswitch_release_env - clear switchdev HW filters
+ * ice_eswitch_release_env - clear eswitch HW filters
* @pf: pointer to PF struct
*
* This function removes HW filters configuration specific for switchdev
@@ -390,91 +238,30 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
*/
static void ice_eswitch_release_env(struct ice_pf *pf)
{
- struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
struct ice_vsi_vlan_ops *vlan_ops;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
- ice_clear_dflt_vsi(uplink_vsi);
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_TX);
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_RX);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
}
/**
- * ice_eswitch_vsi_setup - configure switchdev control VSI
- * @pf: pointer to PF structure
- * @pi: pointer to port_info structure
- */
-static struct ice_vsi *
-ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
-{
- struct ice_vsi_cfg_params params = {};
-
- params.type = ICE_VSI_SWITCHDEV_CTRL;
- params.pi = pi;
- params.flags = ICE_VSI_FLAG_INIT;
-
- return ice_vsi_setup(pf, &params);
-}
-
-/**
- * ice_eswitch_napi_del - remove NAPI handle for all port representors
- * @pf: pointer to PF structure
- */
-static void ice_eswitch_napi_del(struct ice_pf *pf)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf)
- netif_napi_del(&vf->repr->q_vector->napi);
-}
-
-/**
- * ice_eswitch_napi_enable - enable NAPI for all port representors
- * @pf: pointer to PF structure
- */
-static void ice_eswitch_napi_enable(struct ice_pf *pf)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf)
- napi_enable(&vf->repr->q_vector->napi);
-}
-
-/**
- * ice_eswitch_napi_disable - disable NAPI for all port representors
- * @pf: pointer to PF structure
- */
-static void ice_eswitch_napi_disable(struct ice_pf *pf)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf)
- napi_disable(&vf->repr->q_vector->napi);
-}
-
-/**
* ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
* @pf: pointer to PF structure
*/
static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi, *uplink_vsi;
+ struct ice_vsi *uplink_vsi;
uplink_vsi = ice_get_main_vsi(pf);
if (!uplink_vsi)
@@ -486,59 +273,33 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
return -EINVAL;
}
- pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
- if (!pf->switchdev.control_vsi)
- return -ENODEV;
-
- ctrl_vsi = pf->switchdev.control_vsi;
- pf->switchdev.uplink_vsi = uplink_vsi;
+ pf->eswitch.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
- goto err_vsi;
-
- if (ice_repr_add_for_all_vfs(pf))
- goto err_repr_add;
-
- if (ice_eswitch_setup_reprs(pf))
- goto err_setup_reprs;
-
- ice_eswitch_remap_rings_to_vectors(pf);
-
- if (ice_vsi_open(ctrl_vsi))
- goto err_setup_reprs;
+ return -ENODEV;
if (ice_eswitch_br_offloads_init(pf))
goto err_br_offloads;
- ice_eswitch_napi_enable(pf);
+ pf->eswitch.is_running = true;
return 0;
err_br_offloads:
- ice_vsi_close(ctrl_vsi);
-err_setup_reprs:
- ice_repr_rem_from_all_vfs(pf);
-err_repr_add:
ice_eswitch_release_env(pf);
-err_vsi:
- ice_vsi_release(ctrl_vsi);
return -ENODEV;
}
/**
- * ice_eswitch_disable_switchdev - disable switchdev resources
+ * ice_eswitch_disable_switchdev - disable eswitch resources
* @pf: pointer to PF structure
*/
static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
-
- ice_eswitch_napi_disable(pf);
ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_eswitch_release_reprs(pf, ctrl_vsi);
- ice_vsi_release(ctrl_vsi);
- ice_repr_rem_from_all_vfs(pf);
+
+ pf->eswitch.is_running = false;
}
/**
@@ -566,6 +327,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
case DEVLINK_ESWITCH_MODE_LEGACY:
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
pf->hw.pf_id);
+ xa_destroy(&pf->eswitch.reprs);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
@@ -578,6 +340,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
pf->hw.pf_id);
+ xa_init(&pf->eswitch.reprs);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
break;
}
@@ -616,74 +379,123 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
}
/**
- * ice_eswitch_release - cleanup eswitch
+ * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
* @pf: pointer to PF structure
*/
-void ice_eswitch_release(struct ice_pf *pf)
+static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{
- if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ struct ice_repr *repr;
+ unsigned long id;
+
+ if (test_bit(ICE_DOWN, pf->state))
return;
- ice_eswitch_disable_switchdev(pf);
- pf->switchdev.is_running = false;
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_repr_start_tx_queues(repr);
}
/**
- * ice_eswitch_configure - configure eswitch
+ * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
* @pf: pointer to PF structure
*/
-int ice_eswitch_configure(struct ice_pf *pf)
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{
- int status;
+ struct ice_repr *repr;
+ unsigned long id;
- if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
- return 0;
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
- status = ice_eswitch_enable_switchdev(pf);
- if (status)
- return status;
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_repr_stop_tx_queues(repr);
+}
- pf->switchdev.is_running = true;
- return 0;
+static void ice_eswitch_stop_reprs(struct ice_pf *pf)
+{
+ ice_eswitch_stop_all_tx_queues(pf);
}
-/**
- * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
- * @pf: pointer to PF structure
- */
-static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
+static void ice_eswitch_start_reprs(struct ice_pf *pf)
{
- struct ice_vf *vf;
- unsigned int bkt;
+ ice_eswitch_start_all_tx_queues(pf);
+}
- lockdep_assert_held(&pf->vfs.table_lock);
+int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+{
+ struct ice_repr *repr;
+ int err;
- if (test_bit(ICE_DOWN, pf->state))
- return;
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return 0;
- ice_for_each_vf(pf, bkt, vf) {
- if (vf->repr)
- ice_repr_start_tx_queues(vf->repr);
+ if (xa_empty(&pf->eswitch.reprs)) {
+ err = ice_eswitch_enable_switchdev(pf);
+ if (err)
+ return err;
}
+
+ ice_eswitch_stop_reprs(pf);
+
+ repr = ice_repr_add_vf(vf);
+ if (IS_ERR(repr)) {
+ err = PTR_ERR(repr);
+ goto err_create_repr;
+ }
+
+ err = ice_eswitch_setup_repr(pf, repr);
+ if (err)
+ goto err_setup_repr;
+
+ err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL);
+ if (err)
+ goto err_xa_alloc;
+
+ vf->repr_id = repr->id;
+
+ ice_eswitch_start_reprs(pf);
+
+ return 0;
+
+err_xa_alloc:
+ ice_eswitch_release_repr(pf, repr);
+err_setup_repr:
+ ice_repr_rem_vf(repr);
+err_create_repr:
+ if (xa_empty(&pf->eswitch.reprs))
+ ice_eswitch_disable_switchdev(pf);
+ ice_eswitch_start_reprs(pf);
+
+ return err;
}
-/**
- * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
- * @pf: pointer to PF structure
- */
-void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
+void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct devlink *devlink = priv_to_devlink(pf);
- if (test_bit(ICE_DOWN, pf->state))
+ if (!repr)
return;
- ice_for_each_vf(pf, bkt, vf) {
- if (vf->repr)
- ice_repr_stop_tx_queues(vf->repr);
+ ice_eswitch_stop_reprs(pf);
+ xa_erase(&pf->eswitch.reprs, repr->id);
+
+ if (xa_empty(&pf->eswitch.reprs))
+ ice_eswitch_disable_switchdev(pf);
+
+ ice_eswitch_release_repr(pf, repr);
+ ice_repr_rem_vf(repr);
+
+ if (xa_empty(&pf->eswitch.reprs)) {
+ /* since all port representors are destroyed, there is
+ * no point in keeping the nodes
+ */
+ ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
+ devl_lock(devlink);
+ devl_rate_nodes_destroy(devlink);
+ devl_unlock(devlink);
+ } else {
+ ice_eswitch_start_reprs(pf);
}
}
@@ -691,32 +503,37 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
* ice_eswitch_rebuild - rebuild eswitch
* @pf: pointer to PF structure
*/
-int ice_eswitch_rebuild(struct ice_pf *pf)
+void ice_eswitch_rebuild(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- int status;
-
- ice_eswitch_napi_disable(pf);
- ice_eswitch_napi_del(pf);
-
- status = ice_eswitch_setup_env(pf);
- if (status)
- return status;
-
- status = ice_eswitch_setup_reprs(pf);
- if (status)
- return status;
+ struct ice_repr *repr;
+ unsigned long id;
- ice_eswitch_remap_rings_to_vectors(pf);
+ if (!ice_is_switchdev_running(pf))
+ return;
- ice_replay_tc_fltrs(pf);
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_eswitch_detach(pf, repr->vf);
+}
- status = ice_vsi_open(ctrl_vsi);
- if (status)
- return status;
+/**
+ * ice_eswitch_get_target - get netdev based on src_vsi from descriptor
+ * @rx_ring: ring used to receive the packet
+ * @rx_desc: descriptor used to get src_vsi value
+ *
+ * Get src_vsi value from descriptor and load correct representor. If it isn't
+ * found return rx_ring->netdev.
+ */
+struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch;
+ struct ice_32b_rx_flex_desc_nic_2 *desc;
+ struct ice_repr *repr;
- ice_eswitch_napi_enable(pf);
- ice_eswitch_start_all_tx_queues(pf);
+ desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
+ repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi));
+ if (!repr)
+ return rx_ring->netdev;
- return 0;
+ return repr->netdev;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index b18bf83a2f5b..e2e5c0c75e7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -7,9 +7,10 @@
#include <net/devlink.h>
#ifdef CONFIG_ICE_SWITCHDEV
-void ice_eswitch_release(struct ice_pf *pf);
-int ice_eswitch_configure(struct ice_pf *pf);
-int ice_eswitch_rebuild(struct ice_pf *pf);
+void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
+int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -17,7 +18,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
-void ice_eswitch_update_repr(struct ice_vsi *vsi);
+void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
@@ -25,8 +26,16 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc);
#else /* CONFIG_ICE_SWITCHDEV */
-static inline void ice_eswitch_release(struct ice_pf *pf) { }
+static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
+
+static inline int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
@@ -34,7 +43,8 @@ static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
-static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
+static inline void
+ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
@@ -68,5 +78,12 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
return NETDEV_TX_BUSY;
}
+
+static inline struct net_device *
+ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ return rx_ring->netdev;
+}
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index 6ae0269bdf73..ac5beecd028b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
}
- if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
+ if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
- else if (vsi->vf && vsi->vf->repr)
- vsi->vf->repr->br_port = NULL;
+ } else {
+ struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
+
+ if (repr)
+ repr->br_port = NULL;
+ }
xa_erase(&bridge->ports, br_port->vsi_idx);
ice_eswitch_br_port_vlans_flush(br_port);
@@ -947,7 +951,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
static int
ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
{
- struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *vsi = pf->eswitch.uplink_vsi;
struct ice_esw_br_port *br_port;
int err;
@@ -1185,7 +1189,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb,
static void
ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
{
- struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
+ struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads;
ASSERT_RTNL();
@@ -1194,7 +1198,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
- pf->switchdev.br_offloads = NULL;
+ pf->eswitch.br_offloads = NULL;
kfree(br_offloads);
}
@@ -1205,14 +1209,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
ASSERT_RTNL();
- if (pf->switchdev.br_offloads)
+ if (pf->eswitch.br_offloads)
return ERR_PTR(-EEXIST);
br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
- pf->switchdev.br_offloads = br_offloads;
+ pf->eswitch.br_offloads = br_offloads;
br_offloads->pf = pf;
return br_offloads;
@@ -1223,7 +1227,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads;
- br_offloads = pf->switchdev.br_offloads;
+ br_offloads = pf->eswitch.br_offloads;
if (!br_offloads)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a34083567e6f..78b833b3e1d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -129,7 +129,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
- ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors),
+ ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
@@ -802,7 +802,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf)
return -EINVAL;
- data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
+ data = kzalloc(size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -941,15 +941,13 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back;
+ u8 *tx_frame __free(kfree) = NULL;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- struct device *dev;
- u8 *tx_frame;
int i;
- dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
@@ -994,7 +992,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
for (i = 0; i < num_frames; i++) {
if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 8;
- goto lbtest_free_frame;
+ goto remove_mac_filters;
}
}
@@ -1004,8 +1002,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
else if (valid_frames != num_frames)
ret = 10;
-lbtest_free_frame:
- devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
@@ -1142,8 +1138,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ICE_VSI_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_vsi_stats[i].stat_string);
+ ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string);
if (ice_is_port_repr_netdev(netdev))
return;
@@ -1162,8 +1157,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
return;
for (i = 0; i < ICE_PF_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_pf_stats[i].stat_string);
+ ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
@@ -1179,8 +1173,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_priv_flags[i].name);
+ ethtool_puts(&p, ice_gstrings_priv_flags[i].name);
break;
default:
break;
@@ -1850,14 +1843,14 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
linkmode_zero(ks->link_modes.supported);
linkmode_zero(ks->link_modes.advertising);
- for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+ for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
if (phy_types_low & BIT_ULL(i))
ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
req_speeds, advert_phy_type_lo,
i);
}
- for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+ for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
if (phy_types_high & BIT_ULL(i))
ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
req_speeds, advert_phy_type_hi,
@@ -2490,6 +2483,24 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V4_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
break;
+ case GTPU_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_TEID_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_EH_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_UL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_DL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4;
+ break;
case TCP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
break;
@@ -2499,33 +2510,39 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
break;
+ case GTPU_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_TEID_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_EH_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_UL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_DL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6;
+ break;
default:
break;
}
return hdrs;
}
-#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
-#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
-#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
-#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
-#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
-#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
-#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
-
/**
* ice_parse_hash_flds - parses hash fields from RSS hash input
* @nfc: ethtool rxnfc command
+ * @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended
* hash fields for RSS configuration
*/
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
+static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2534,6 +2551,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ case GTPU_V4_FLOW:
+ case GTPC_V4_FLOW:
+ case GTPC_TEID_V4_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_DL_V4_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
if (nfc->data & RXH_IP_DST)
@@ -2542,6 +2565,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
if (nfc->data & RXH_IP_DST)
@@ -2580,6 +2609,33 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
}
}
+ if (nfc->data & RXH_GTP_TEID) {
+ switch (nfc->flow_type) {
+ case GTPC_TEID_V4_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID;
+ break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID;
+ break;
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID;
+ break;
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID;
+ break;
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID;
+ break;
+ default:
+ break;
+ }
+ }
+
return hfld;
}
@@ -2594,9 +2650,11 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
+ struct ice_rss_hash_cfg cfg;
struct device *dev;
u64 hashed_flds;
int status;
+ bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2606,7 +2664,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- hashed_flds = ice_parse_hash_flds(nfc);
+ symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
+ hashed_flds = ice_parse_hash_flds(nfc, symm);
if (hashed_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
vsi->vsi_num);
@@ -2620,7 +2679,12 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
+ cfg.hash_flds = hashed_flds;
+ cfg.addl_hdrs = hdrs;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+ cfg.symm = symm;
+
+ status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
if (status) {
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
vsi->vsi_num, status);
@@ -2641,6 +2705,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
+ bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2659,7 +2724,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return;
}
- hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
+ hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
@@ -2683,6 +2748,13 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
nfc->data |= (u64)RXH_L4_B_2_3;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
+ nfc->data |= (u64)RXH_GTP_TEID;
}
/**
@@ -3198,11 +3270,18 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
return np->vsi->rss_table_size;
}
+/**
+ * ice_get_rxfh - get the Rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ *
+ * Reads the indirection table directly from the hardware.
+ */
static int
-ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
@@ -3233,17 +3312,18 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
vsi = vsi->tc_map_vsi[rss_context];
}
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
+ rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
- if (!indir)
+ if (!rxfh->indir)
return 0;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
- err = ice_get_rss_key(vsi, key);
+ err = ice_get_rss_key(vsi, rxfh->key);
if (err)
goto out;
@@ -3253,12 +3333,12 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
if (ice_is_adq_active(pf)) {
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = offset + lut[i] % qcount;
+ rxfh->indir[i] = offset + lut[i] % qcount;
goto out;
}
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = lut[i];
+ rxfh->indir[i] = lut[i];
out:
kfree(lut);
@@ -3266,42 +3346,31 @@ out:
}
/**
- * ice_get_rxfh - get the Rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
- *
- * Reads the indirection table directly from the hardware.
- */
-static int
-ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
-{
- return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
-}
-
-/**
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
static int
-ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
int err;
dev = ice_pf_to_dev(pf);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (rxfh->rss_context)
return -EOPNOTSUPP;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3315,7 +3384,15 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EOPNOTSUPP;
}
- if (key) {
+ /* Update the VSI's hash function */
+ if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ err = ice_set_rss_hfunc(vsi, hfunc);
+ if (err)
+ return err;
+
+ if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
@@ -3323,7 +3400,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
- memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
+ memcpy(vsi->rss_hkey_user, rxfh->key,
+ ICE_VSIQF_HKEY_ARRAY_SIZE);
err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
if (err)
@@ -3338,11 +3416,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- if (indir) {
+ if (rxfh->indir) {
int i;
for (i = 0; i < vsi->rss_table_size; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
} else {
ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
vsi->rss_size);
@@ -3361,7 +3439,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
struct ice_pf *pf = ice_netdev_to_pf(dev);
/* only report timestamping if PTP is enabled */
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -4220,9 +4298,11 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
+ .cap_rss_sym_xor_supported = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@@ -4253,7 +4333,6 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
- .get_rxfh_context = ice_get_rxfh_context,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index d151e5bacfec..e3cab8e98f52 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -41,6 +41,8 @@ static struct in6_addr zero_ipv6_addr_mask = {
static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
{
switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ return ETHER_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
return TCP_V4_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
@@ -72,6 +74,8 @@ static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
{
switch (eth) {
+ case ETHER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_ETH;
case TCP_V4_FLOW:
return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
case UDP_V4_FLOW:
@@ -137,6 +141,10 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
switch (fsp->flow_type) {
+ case ETHER_FLOW:
+ fsp->h_u.ether_spec = rule->eth;
+ fsp->m_u.ether_spec = rule->eth_mask;
+ break;
case IPV4_USER_FLOW:
fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
fsp->h_u.usr_ip4_spec.proto = 0;
@@ -302,9 +310,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
continue;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
- u64 prof_id;
-
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ u64 prof_id = prof->prof_id[tun];
for (i = 0; i < prof->cnt; i++) {
if (prof->vsi_h[i] != vsi_idx)
@@ -362,10 +368,9 @@ ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
return;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
- u64 prof_id;
+ u64 prof_id = prof->prof_id[tun];
int j;
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
for (j = 0; j < prof->cnt; j++) {
u16 vsi_num;
@@ -439,14 +444,12 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
struct ice_flow_prof *hw_prof;
struct ice_fd_hw_prof *prof;
- u64 prof_id;
int j;
prof = hw->fdir_prof[flow];
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
+ ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
prof->fdir_seg[tun], TNL_SEG_CNT(tun),
- &hw_prof);
+ false, &hw_prof);
for (j = 0; j < prof->cnt; j++) {
enum ice_flow_priority prio;
u64 entry_h = 0;
@@ -454,7 +457,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
prio = ICE_FLOW_PRIO_NORMAL;
err = ice_flow_add_entry(hw, ICE_BLK_FD,
- prof_id,
+ hw_prof->id,
prof->vsi_h[0],
prof->vsi_h[j],
prio, prof->fdir_seg,
@@ -464,6 +467,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
flow);
continue;
}
+ prof->prof_id[tun] = hw_prof->id;
prof->entry_h[j][tun] = entry_h;
}
}
@@ -507,8 +511,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
return -EINVAL;
data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
- data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
- ICE_USERDEF_FLEX_OFFS_S;
+ data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value);
if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
return -EINVAL;
@@ -638,7 +641,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
u64 entry1_h = 0;
u64 entry2_h = 0;
bool del_last;
- u64 prof_id;
int err;
int idx;
@@ -668,7 +670,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* then return error.
*/
if (hw->fdir_fltr_cnt[flow]) {
- dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return -EINVAL;
}
@@ -686,23 +688,23 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* That is the final parameters are 1 header (segment), no
* actions (NULL) and zero actions 0.
*/
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- TNL_SEG_CNT(tun), &prof);
+ err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
+ TNL_SEG_CNT(tun), false, &prof);
if (err)
return err;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (err)
goto err_prof;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (err)
goto err_entry;
hw_prof->fdir_seg[tun] = seg;
+ hw_prof->prof_id[tun] = prof->id;
hw_prof->entry_h[0][tun] = entry1_h;
hw_prof->entry_h[1][tun] = entry2_h;
hw_prof->vsi_h[0] = main_vsi->idx;
@@ -719,7 +721,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
entry1_h = 0;
vsi_h = main_vsi->tc_map_vsi[idx]->idx;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
main_vsi->idx, vsi_h,
ICE_FLOW_PRIO_NORMAL, seg,
&entry1_h);
@@ -756,7 +758,7 @@ err_unroll:
if (!hw_prof->entry_h[idx][tun])
continue;
- ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
hw_prof->entry_h[idx][tun] = 0;
if (del_last)
@@ -766,11 +768,11 @@ err_unroll:
hw_prof->cnt = 0;
err_entry:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
- ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
+ ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
- ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
- dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return err;
}
@@ -1200,6 +1202,122 @@ ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
}
/**
+ * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule
+ * @dev: network interface device structure
+ * @fsp: pointer to ethtool Rx flow specification
+ *
+ * Return: true if vlan data is valid, false otherwise
+ */
+static bool ice_fdir_vlan_valid(struct device *dev,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype))
+ return false;
+
+ if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
+ return false;
+
+ /* proto and vlan must have vlan-etype defined */
+ if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci &&
+ !fsp->m_ext.vlan_etype) {
+ dev_warn(dev, "Filter with proto and vlan require also vlan-etype");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_ether_flow_seg - set address and protocol segments for ether flow
+ * @dev: network interface device structure
+ * @seg: flow segment for programming
+ * @eth_spec: mask data from ethtool
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+static int ice_set_ether_flow_seg(struct device *dev,
+ struct ice_flow_seg_info *seg,
+ struct ethhdr *eth_spec)
+{
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH);
+
+ /* empty rules are not valid */
+ if (is_zero_ether_addr(eth_spec->h_source) &&
+ is_zero_ether_addr(eth_spec->h_dest) &&
+ !eth_spec->h_proto)
+ return -EINVAL;
+
+ /* Ethertype */
+ if (eth_spec->h_proto == htons(0xFFFF)) {
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ } else if (eth_spec->h_proto) {
+ dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether");
+ return -EOPNOTSUPP;
+ }
+
+ /* Source MAC address */
+ if (is_broadcast_ether_addr(eth_spec->h_source))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!is_zero_ether_addr(eth_spec->h_source))
+ goto err_mask;
+
+ /* Destination MAC address */
+ if (is_broadcast_ether_addr(eth_spec->h_dest))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!is_zero_ether_addr(eth_spec->h_dest))
+ goto err_mask;
+
+ return 0;
+
+err_mask:
+ dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether");
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_set_fdir_vlan_seg - set vlan segments for ether flow
+ * @seg: flow segment for programming
+ * @ext_masks: masks for additional RX flow fields
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+static int
+ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_flow_ext *ext_masks)
+{
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN);
+
+ if (ext_masks->vlan_etype) {
+ if (ext_masks->vlan_etype != htons(0xFFFF))
+ return -EOPNOTSUPP;
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ if (ext_masks->vlan_tci) {
+ if (ext_masks->vlan_tci != htons(0xFFFF))
+ return -EOPNOTSUPP;
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ return 0;
+}
+
+/**
* ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
* @pf: PF structure
* @fsp: pointer to ethtool Rx flow specification
@@ -1215,7 +1333,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
struct device *dev = ice_pf_to_dev(pf);
enum ice_fltr_ptype fltr_idx;
struct ice_hw *hw = &pf->hw;
- bool perfect_filter;
+ bool perfect_filter = false;
int ret;
seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
@@ -1268,6 +1386,16 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
&perfect_filter);
break;
+ case ETHER_FLOW:
+ ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec);
+ if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) {
+ if (!ice_fdir_vlan_valid(dev, fsp)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext);
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -1829,6 +1957,10 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
break;
+ case ETHER_FLOW:
+ input->eth = fsp->h_u.ether_spec;
+ input->eth_mask = fsp->m_u.ether_spec;
+ break;
default:
/* not doing un-parsed flow types */
return -EINVAL;
@@ -1853,6 +1985,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
struct ice_pf *pf;
struct ice_hw *hw;
int fltrs_needed;
+ u32 max_location;
u16 tunnel_port;
int ret;
@@ -1884,8 +2017,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
if (ret)
return ret;
- if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
- dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ max_location = ice_get_fdir_cnt_all(hw);
+ if (fsp->location >= max_location) {
+ dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n",
+ max_location);
return -ENOSPC;
}
@@ -1893,7 +2028,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
- dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
return -ENOSPC;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index ae089d32ee9d..26b357c0ae15 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -4,6 +4,8 @@
#include "ice_common.h"
/* These are training packet headers used to program flow director filters. */
+static const u8 ice_fdir_eth_pkt[22];
+
static const u8 ice_fdir_tcpv4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
@@ -417,6 +419,11 @@ static const u8 ice_fdir_ip6_tun_pkt[] = {
/* Flow Director no-op training packet table */
static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
{
+ ICE_FLTR_PTYPE_NONF_ETH,
+ sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
+ sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
+ },
+ {
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,
sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt,
@@ -604,55 +611,32 @@ ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx,
u64 qword;
/* prep QW0 of FD filter programming desc */
- qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) &
- ICE_FXD_FLTR_QW0_QINDEX_M;
- qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) &
- ICE_FXD_FLTR_QW0_COMP_Q_M;
- qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) &
- ICE_FXD_FLTR_QW0_COMP_REPORT_M;
- qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) &
- ICE_FXD_FLTR_QW0_FD_SPACE_M;
- qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) &
- ICE_FXD_FLTR_QW0_STAT_CNT_M;
- qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) &
- ICE_FXD_FLTR_QW0_STAT_ENA_M;
- qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) &
- ICE_FXD_FLTR_QW0_EVICT_ENA_M;
- qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) &
- ICE_FXD_FLTR_QW0_TO_Q_M;
- qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) &
- ICE_FXD_FLTR_QW0_TO_Q_PRI_M;
- qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) &
- ICE_FXD_FLTR_QW0_DPU_RECIPE_M;
- qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) &
- ICE_FXD_FLTR_QW0_DROP_M;
- qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) &
- ICE_FXD_FLTR_QW0_FLEX_PRI_M;
- qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) &
- ICE_FXD_FLTR_QW0_FLEX_MDID_M;
- qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) &
- ICE_FXD_FLTR_QW0_FLEX_VAL_M;
+ qword = FIELD_PREP(ICE_FXD_FLTR_QW0_QINDEX_M, ctx->qindex);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_Q_M, ctx->comp_q);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_REPORT_M, ctx->comp_report);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FD_SPACE_M, ctx->fd_space);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_CNT_M, ctx->cnt_index);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_ENA_M, ctx->cnt_ena);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_EVICT_ENA_M, ctx->evict_ena);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_M, ctx->toq);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_PRI_M, ctx->toq_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DPU_RECIPE_M, ctx->dpu_recipe);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DROP_M, ctx->drop);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_PRI_M, ctx->flex_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_MDID_M, ctx->flex_mdid);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_VAL_M, ctx->flex_val);
fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword);
/* prep QW1 of FD filter programming desc */
- qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) &
- ICE_FXD_FLTR_QW1_DTYPE_M;
- qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) &
- ICE_FXD_FLTR_QW1_PCMD_M;
- qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) &
- ICE_FXD_FLTR_QW1_PROF_PRI_M;
- qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) &
- ICE_FXD_FLTR_QW1_PROF_M;
- qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) &
- ICE_FXD_FLTR_QW1_FD_VSI_M;
- qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) &
- ICE_FXD_FLTR_QW1_SWAP_M;
- qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) &
- ICE_FXD_FLTR_QW1_FDID_PRI_M;
- qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) &
- ICE_FXD_FLTR_QW1_FDID_MDID_M;
- qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) &
- ICE_FXD_FLTR_QW1_FDID_M;
+ qword = FIELD_PREP(ICE_FXD_FLTR_QW1_DTYPE_M, ctx->dtype);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PCMD_M, ctx->pcmd);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_PRI_M, ctx->desc_prof_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_M, ctx->desc_prof);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FD_VSI_M, ctx->fd_vsi);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_SWAP_M, ctx->swap);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_PRI_M, ctx->fdid_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_MDID_M, ctx->fdid_mdid);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_M, ctx->fdid);
fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword);
}
@@ -937,6 +921,21 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* perspective. The input from user is from Rx filter perspective.
*/
switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ ice_pkt_insert_mac_addr(loc, input->eth.h_dest);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->eth.h_source);
+ if (input->ext_data.vlan_tag || input->ext_data.vlan_type) {
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
+ input->ext_data.vlan_type);
+ ice_pkt_insert_u16(loc, ICE_ETH_VLAN_TCI_OFFSET,
+ input->ext_data.vlan_tag);
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_VLAN_OFFSET,
+ input->eth.h_proto);
+ } else {
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
+ input->eth.h_proto);
+ }
+ break;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
@@ -1212,52 +1211,58 @@ static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
* ice_fdir_comp_rules - compare 2 filters
* @a: a Flow Director filter data structure
* @b: a Flow Director filter data structure
- * @v6: bool true if v6 filter
*
* Returns true if the filters match
*/
static bool
-ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
+ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)
{
enum ice_fltr_ptype flow_type = a->flow_type;
/* The calling function already checks that the two filters have the
* same flow_type.
*/
- if (!v6) {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.dst_port == b->ip.v4.dst_port &&
- a->ip.v4.src_port == b->ip.v4.src_port)
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.l4_header == b->ip.v4.l4_header &&
- a->ip.v4.proto == b->ip.v4.proto &&
- a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
- a->ip.v4.tos == b->ip.v4.tos)
- return true;
- }
- } else {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port &&
- !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
- b->ip.v6.dst_ip) &&
- !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
- b->ip.v6.src_ip))
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port)
- return true;
- }
+ switch (flow_type) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ if (!memcmp(&a->eth, &b->eth, sizeof(a->eth)))
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.dst_port == b->ip.v4.dst_port &&
+ a->ip.v4.src_port == b->ip.v4.src_port)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.l4_header == b->ip.v4.l4_header &&
+ a->ip.v4.proto == b->ip.v4.proto &&
+ a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
+ a->ip.v4.tos == b->ip.v4.tos)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port &&
+ !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
+ b->ip.v6.dst_ip) &&
+ !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
+ b->ip.v6.src_ip))
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port)
+ return true;
+ break;
+ default:
+ break;
}
return false;
@@ -1276,19 +1281,10 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
bool ret = false;
list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
- enum ice_fltr_ptype flow_type;
-
if (rule->flow_type != input->flow_type)
continue;
- flow_type = input->flow_type;
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
- ret = ice_fdir_comp_rules(rule, input, false);
- else
- ret = ice_fdir_comp_rules(rule, input, true);
+ ret = ice_fdir_comp_rules(rule, input);
if (ret) {
if (rule->fltr_id == input->fltr_id &&
rule->q_index != input->q_index)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index 1b9b84490689..021ecbac7848 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -8,6 +8,9 @@
#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
/* macros for offsets into packets for flow director programming */
+#define ICE_ETH_TYPE_F_OFFSET 12
+#define ICE_ETH_VLAN_TCI_OFFSET 14
+#define ICE_ETH_TYPE_VLAN_OFFSET 16
#define ICE_IPV4_SRC_ADDR_OFFSET 26
#define ICE_IPV4_DST_ADDR_OFFSET 30
#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34
@@ -159,6 +162,8 @@ struct ice_fdir_fltr {
struct list_head fltr_node;
enum ice_fltr_ptype flow_type;
+ struct ethhdr eth, eth_mask;
+
union {
struct ice_fdir_v4 v4;
struct ice_fdir_v6 v6;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 5ce413965930..20d5db88c99f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1218,11 +1218,13 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @blk: HW block
* @fv: field vector to search for
* @masks: masks for FV
+ * @symm: symmetric setting for RSS flows
* @prof_id: receives the profile ID
*/
static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
- struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
+ struct ice_fv_word *fv, u16 *masks, bool symm,
+ u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
u8 i;
@@ -1236,6 +1238,9 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
+ if (blk == ICE_BLK_RSS && es->symm[i] != symm)
+ continue;
+
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
@@ -1409,13 +1414,13 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
switch (blk) {
case ICE_BLK_RSS:
offset = GLQF_HMASK(mask_idx);
- val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
- val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
+ val = FIELD_PREP(GLQF_HMASK_MSK_INDEX_M, idx);
+ val |= FIELD_PREP(GLQF_HMASK_MASK_M, mask);
break;
case ICE_BLK_FD:
offset = GLQF_FDMASK(mask_idx);
- val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
- val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
+ val = FIELD_PREP(GLQF_FDMASK_MSK_INDEX_M, idx);
+ val |= FIELD_PREP(GLQF_FDMASK_MASK_M, mask);
break;
default:
ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
@@ -1716,15 +1721,16 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
}
/**
- * ice_write_es - write an extraction sequence to hardware
+ * ice_write_es - write an extraction sequence and symmetric setting to hardware
* @hw: pointer to the HW struct
* @blk: the block in which to write the extraction sequence
* @prof_id: the profile ID to write
* @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ * @symm: symmetric setting for RSS profiles
*/
static void
ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
- struct ice_fv_word *fv)
+ struct ice_fv_word *fv, bool symm)
{
u16 off;
@@ -1737,6 +1743,9 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
memcpy(&hw->blk[blk].es.t[off], fv,
hw->blk[blk].es.fvw * sizeof(*fv));
}
+
+ if (blk == ICE_BLK_RSS)
+ hw->blk[blk].es.symm[prof_id] = symm;
}
/**
@@ -1753,7 +1762,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
- ice_write_es(hw, blk, prof_id, NULL);
+ ice_write_es(hw, blk, prof_id, NULL, false);
ice_free_prof_masks(hw, blk, prof_id);
return ice_free_prof_id(hw, blk, prof_id);
}
@@ -2116,8 +2125,10 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
}
list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
@@ -2150,6 +2161,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@@ -2178,8 +2190,11 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
+ memset(es->symm, 0, es->count * sizeof(*es->symm));
memset(es->written, 0, es->count * sizeof(*es->written));
memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
+
+ memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id));
}
}
@@ -2196,6 +2211,7 @@ int ice_init_hw_tbls(struct ice_hw *hw)
ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@@ -2292,6 +2308,11 @@ int ice_init_hw_tbls(struct ice_hw *hw)
if (!es->ref_count)
goto err;
+ es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->symm), GFP_KERNEL);
+ if (!es->symm)
+ goto err;
+
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
if (!es->written)
@@ -2301,6 +2322,12 @@ int ice_init_hw_tbls(struct ice_hw *hw)
sizeof(*es->mask_ena), GFP_KERNEL);
if (!es->mask_ena)
goto err;
+
+ prof_id->count = blk_sizes[i].prof_id;
+ prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count,
+ sizeof(*prof_id->id), GFP_KERNEL);
+ if (!prof_id->id)
+ goto err;
}
return 0;
@@ -2963,6 +2990,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @attr_cnt: number of elements in attr array
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
+ * @symm: symmetric setting for RSS profiles
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@@ -2972,7 +3000,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks)
+ struct ice_fv_word *es, u16 *masks, bool symm)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -2986,7 +3014,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
- status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
+ status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
@@ -3009,7 +3037,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
goto err_ice_add_prof;
/* and write new es */
- ice_write_es(hw, blk, prof_id, es);
+ ice_write_es(hw, blk, prof_id, es, symm);
}
ice_prof_inc_ref(hw, blk, prof_id);
@@ -3097,7 +3125,7 @@ err_ice_add_prof:
* This will search for a profile tracking ID which was previously added.
* The profile map lock should be held before calling this function.
*/
-static struct ice_prof_map *
+struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 7af7c8e9aa4e..b39d7cdc381f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -42,7 +42,9 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks);
+ struct ice_fv_word *es, u16 *masks, bool symm);
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 4f42e14ed3ae..817beca591e0 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -93,6 +93,7 @@ enum ice_tunnel_type {
TNL_GRETAP,
TNL_GTPC,
TNL_GTPU,
+ TNL_PFCP,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -146,6 +147,7 @@ struct ice_es {
u32 *mask_ena;
struct list_head prof_map;
struct ice_fv_word *t;
+ u8 *symm; /* symmetric setting per profile (RSS blk)*/
struct mutex prof_map_lock; /* protect access to profiles list */
u8 *written;
u8 reverse; /* set to true to reverse FV order */
@@ -304,10 +306,16 @@ struct ice_masks {
struct ice_mask masks[ICE_PROF_MASK_COUNT];
};
+struct ice_prof_id {
+ unsigned long *id;
+ int count;
+};
+
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
+ struct ice_prof_id prof_id;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
@@ -351,7 +359,8 @@ enum ice_prof_type {
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_GTPU = 0x8,
ICE_PROF_TUN_GTPC = 0x10,
- ICE_PROF_TUN_ALL = 0x1E,
+ ICE_PROF_TUN_PFCP = 0x20,
+ ICE_PROF_TUN_ALL = 0x3E,
ICE_PROF_ALL = 0xFF,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fb8b925aaf8b..fc2b58f56279 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1235,6 +1235,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
+#define ICE_FLOW_FIND_PROF_CHK_SYMM 0x00000008
/**
* ice_flow_find_prof_conds - Find a profile matching headers and conditions
@@ -1243,13 +1244,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
* @dir: flow direction
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
* @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
*/
static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
- u8 segs_cnt, u16 vsi_handle, u32 conds)
+ u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds)
{
struct ice_flow_prof *p, *prof = NULL;
@@ -1265,6 +1267,11 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
!test_bit(vsi_handle, p->vsis))
continue;
+ /* Check for symmetric settings */
+ if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) &&
+ p->symm != symm)
+ continue;
+
/* Protocol headers must be checked. Matched fields are
* checked if specified.
*/
@@ -1328,26 +1335,33 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*
* Assumption: the caller has acquired the lock to the profile list
*/
static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
- enum ice_flow_dir dir, u64 prof_id,
+ enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof)
+ bool symm, struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
+ struct ice_prof_id *ids;
int status;
+ u64 prof_id;
u8 i;
if (!prof)
return -EINVAL;
+ ids = &hw->blk[blk].prof_id;
+ prof_id = find_first_zero_bit(ids->id, ids->count);
+ if (prof_id >= ids->count)
+ return -ENOSPC;
+
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -1369,6 +1383,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
params->prof->id = prof_id;
params->prof->dir = dir;
params->prof->segs_cnt = segs_cnt;
+ params->prof->symm = symm;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
@@ -1385,7 +1400,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
- params->mask);
+ params->mask, symm);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1393,6 +1408,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
INIT_LIST_HEAD(&params->prof->entries);
mutex_init(&params->prof->entries_lock);
+ set_bit(prof_id, ids->id);
*prof = params->prof;
out:
@@ -1436,6 +1452,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
+ clear_bit(prof->id, hw->blk[blk].prof_id.id);
list_del(&prof->l_entry);
mutex_destroy(&prof->entries_lock);
devm_kfree(ice_hw_to_dev(hw), prof);
@@ -1511,15 +1528,15 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*/
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof)
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ bool symm, struct ice_flow_prof **prof)
{
int status;
@@ -1538,8 +1555,8 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
mutex_lock(&hw->fl_profs_locks[blk]);
- status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
- prof);
+ status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt,
+ symm, prof);
if (!status)
list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
@@ -1855,37 +1872,49 @@ int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
/**
* ice_flow_set_rss_seg_info - setup packet segments for RSS
* @segs: pointer to the flow field segment(s)
- * @hash_fields: fields to be hashed on for the segment(s)
- * @flow_hdr: protocol header fields within a packet segment
+ * @seg_cnt: segment count
+ * @cfg: configure parameters
*
* Helper function to extract fields from hash bitmap and use flow
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
static int
-ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
- u32 flow_hdr)
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_flow_seg_info *seg;
u64 val;
- u8 i;
+ u16 i;
- for_each_set_bit(i, (unsigned long *)&hash_fields,
- ICE_FLOW_FIELD_IDX_MAX)
- ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ /* set inner most segment */
+ seg = &segs[seg_cnt - 1];
+
+ for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds,
+ (u16)ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
- ICE_FLOW_SET_HDRS(segs, flow_hdr);
+ ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
+
+ /* set outer most header */
+ if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
+ segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
+ else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
+ segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
- if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
+ if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return -EINVAL;
- val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+ val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
- val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+ val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
@@ -1956,6 +1985,39 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_get_rss_hdr_type - get a RSS profile's header type
+ * @prof: RSS flow profile
+ */
+static enum ice_rss_cfg_hdr_type
+ice_get_rss_hdr_type(struct ice_flow_prof *prof)
+{
+ if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
+ return ICE_RSS_OUTER_HEADERS;
+ } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
+ const struct ice_flow_seg_info *s;
+
+ s = &prof->segs[ICE_RSS_OUTER_HEADERS];
+ if (s->hdrs == ICE_FLOW_SEG_HDR_NONE)
+ return ICE_RSS_INNER_HEADERS;
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
+ }
+
+ return ICE_RSS_ANY_HEADERS;
+}
+
+static bool
+ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof,
+ enum ice_rss_cfg_hdr_type hdr_type)
+{
+ return (r->hash.hdr_type == hdr_type &&
+ r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs);
+}
+
+/**
* ice_rem_rss_list - remove RSS configuration from list
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
@@ -1966,15 +2028,16 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
static void
ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
+ enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *tmp;
/* Search for RSS hash fields associated to the VSI that match the
* hash configurations associated to the flow profile. If found
* remove from the RSS entry list of the VSI context and delete entry.
*/
+ hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
- if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
- r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ if (ice_rss_match_prof(r, prof, hdr_type)) {
clear_bit(vsi_handle, r->vsis);
if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
list_del(&r->l_entry);
@@ -1995,11 +2058,12 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
+ enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *rss_cfg;
+ hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
- if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
- r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ if (ice_rss_match_prof(r, prof, hdr_type)) {
set_bit(vsi_handle, r->vsis);
return 0;
}
@@ -2009,8 +2073,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
if (!rss_cfg)
return -ENOMEM;
- rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
- rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
+ rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->hash.hdr_type = hdr_type;
+ rss_cfg->hash.symm = prof->symm;
set_bit(vsi_handle, rss_cfg->vsis);
list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
@@ -2018,65 +2084,177 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
return 0;
}
-#define ICE_FLOW_PROF_HASH_S 0
-#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
-#define ICE_FLOW_PROF_HDR_S 32
-#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
-#define ICE_FLOW_PROF_ENCAP_S 63
-#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+/**
+ * ice_rss_config_xor_word - set the HSYMM registers for one input set word
+ * @hw: pointer to the hardware structure
+ * @prof_id: RSS hardware profile id
+ * @src: the FV index used by the protocol's source field
+ * @dst: the FV index used by the protocol's destination field
+ *
+ * Write to the HSYMM register with the index of @src FV the value of the @dst
+ * FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst]
+ * while calculating the RSS input set.
+ */
+static void
+ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
+{
+ u32 val, reg, bits_shift;
+ u8 reg_idx;
+
+ reg_idx = src / GLQF_HSYMM_REG_SIZE;
+ bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3);
+ val = dst | GLQF_HSYMM_ENABLE_BIT;
+
+ reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx));
+ reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift);
+ wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg);
+}
-#define ICE_RSS_OUTER_HEADERS 1
-#define ICE_RSS_INNER_HEADERS 2
+/**
+ * ice_rss_config_xor - set the symmetric registers for a profile's protocol
+ * @hw: pointer to the hardware structure
+ * @prof_id: RSS hardware profile id
+ * @src: the FV index used by the protocol's source field
+ * @dst: the FV index used by the protocol's destination field
+ * @len: length of the source/destination fields in words
+ */
+static void
+ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
+{
+ int fv_last_word =
+ ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ice_rss_config_xor_word(hw, prof_id,
+ /* Yes, field vector in GLQF_HSYMM and
+ * GLQF_HINSET is inversed!
+ */
+ fv_last_word - (src + i),
+ fv_last_word - (dst + i));
+ ice_rss_config_xor_word(hw, prof_id,
+ fv_last_word - (dst + i),
+ fv_last_word - (src + i));
+ }
+}
-/* Flow profile ID format:
- * [0:31] - Packet match fields
- * [32:62] - Protocol header
- * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+/**
+ * ice_rss_set_symm - set the symmetric settings for an RSS profile
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ *
+ * The symmetric hash will result from XORing the protocol's fields with
+ * indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's
+ * GLQF_HSYMM registers.
*/
-#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
- ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
- (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
- ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)))
+static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
+{
+ struct ice_prof_map *map;
+ u8 prof_id, m;
+
+ mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
+ if (map)
+ prof_id = map->prof_id;
+ mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+
+ if (!map)
+ return;
+
+ /* clear to default */
+ for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++)
+ wr32(hw, GLQF_HSYMM(prof_id, m), 0);
+
+ if (prof->symm) {
+ struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst;
+ struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst;
+ struct ice_flow_seg_xtrct *sctp_src, *sctp_dst;
+ struct ice_flow_seg_xtrct *tcp_src, *tcp_dst;
+ struct ice_flow_seg_xtrct *udp_src, *udp_dst;
+ struct ice_flow_seg_info *seg;
+
+ seg = &prof->segs[prof->segs_cnt - 1];
+
+ ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
+ ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
+
+ ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
+ ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
+
+ tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
+ tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
+
+ udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
+ udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
+
+ sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
+ sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
+
+ /* xor IPv4 */
+ if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv4_src->idx, ipv4_dst->idx, 2);
+
+ /* xor IPv6 */
+ if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv6_src->idx, ipv6_dst->idx, 8);
+
+ /* xor TCP */
+ if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ tcp_src->idx, tcp_dst->idx, 1);
+
+ /* xor UDP */
+ if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ udp_src->idx, udp_dst->idx, 1);
+
+ /* xor SCTP */
+ if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ sctp_src->idx, sctp_dst->idx, 1);
+ }
+}
/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
- * @addl_hdrs: protocol header fields
- * @segs_cnt: packet segment count
+ * @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
-ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs, u8 segs_cnt)
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
+ u8 segs_cnt;
int status;
- if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
- return -EINVAL;
+ segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
+ ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
- addl_hdrs);
+ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto exit;
- /* Search for a flow profile that has matching headers, hash fields
- * and has the input VSI associated to it. If found, no further
+ /* Search for a flow profile that has matching headers, hash fields,
+ * symm and has the input VSI associated to it. If found, no further
* operations required and exit.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS |
+ ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof)
goto exit;
@@ -2087,7 +2265,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* the protocol header and new hash field configuration.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+ cfg->symm, vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof) {
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
if (!status)
@@ -2103,11 +2282,12 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
}
}
- /* Search for a profile that has same match fields only. If this
- * exists then associate the VSI to this profile.
+ /* Search for a profile that has the same match fields and symmetric
+ * setting. If this exists then associate the VSI to this profile.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (prof) {
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
@@ -2116,17 +2296,14 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
goto exit;
}
- /* Create a new flow profile with generated profile and packet
- * segment information.
- */
+ /* Create a new flow profile with packet segment information. */
status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
- ICE_FLOW_GEN_PROFID(hashed_flds,
- segs[segs_cnt - 1].hdrs,
- segs_cnt),
- segs, segs_cnt, &prof);
+ segs, segs_cnt, cfg->symm, &prof);
if (status)
goto exit;
+ prof->symm = cfg->symm;
+ ice_rss_set_symm(hw, prof);
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
/* If association to a new flow profile failed then this profile can
* be removed.
@@ -2146,30 +2323,43 @@ exit:
/**
* ice_add_rss_cfg - add an RSS configuration with specified hashed fields
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
- * @addl_hdrs: protocol header fields
+ * @vsi: VSI to add the RSS configuration to
+ * @cfg: configure parameters
*
* This function will generate a flow profile based on fields associated with
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
int
-ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_rss_hash_cfg local_cfg;
+ u16 vsi_handle;
int status;
- if (hashed_flds == ICE_HASH_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!vsi)
+ return -EINVAL;
+
+ vsi_handle = vsi->idx;
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
- ICE_RSS_OUTER_HEADERS);
- if (!status)
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
- addl_hdrs, ICE_RSS_INNER_HEADERS);
+ local_cfg = *cfg;
+ if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ } else {
+ local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ if (!status) {
+ local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ &local_cfg);
+ }
+ }
mutex_unlock(&hw->rss_locks);
return status;
@@ -2179,33 +2369,33 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* ice_rem_rss_cfg_sync - remove an existing RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
- * @addl_hdrs: Protocol header fields within a packet segment
- * @segs_cnt: packet segment count
+ * @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
-ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs, u8 segs_cnt)
+ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
+ u8 segs_cnt;
int status;
+ segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
+ ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
- addl_hdrs);
+ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto out;
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
status = -ENOENT;
@@ -2233,31 +2423,39 @@ out:
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
- * @addl_hdrs: Protocol header fields within a packet segment
+ * @cfg: configure parameters
*
* This function will lookup the flow profile based on the input
* hash field bitmap, iterate through the profile entry list of
* that profile and find entry associated with input VSI to be
- * removed. Calls are made to underlying flow s which will APIs
+ * removed. Calls are made to underlying flow apis which will in
* turn build or update buffers for RSS XLT1 section.
*/
-int __maybe_unused
-ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+int
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_rss_hash_cfg local_cfg;
int status;
- if (hashed_flds == ICE_HASH_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
- ICE_RSS_OUTER_HEADERS);
- if (!status)
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
- addl_hdrs, ICE_RSS_INNER_HEADERS);
+ local_cfg = *cfg;
+ if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ } else {
+ local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ if (!status) {
+ local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle,
+ &local_cfg);
+ }
+ }
mutex_unlock(&hw->rss_locks);
return status;
@@ -2298,18 +2496,24 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
+ * @vsi: VF's VSI
* @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
{
+ struct ice_rss_hash_cfg hcfg;
+ u16 vsi_handle;
int status = 0;
u64 hash_flds;
+ if (!vsi)
+ return -EINVAL;
+
+ vsi_handle = vsi->idx;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
@@ -2379,8 +2583,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
if (rss_hash == ICE_HASH_INVALID)
return -EIO;
- status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
- ICE_FLOW_SEG_HDR_NONE);
+ hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ hcfg.hash_flds = rss_hash;
+ hcfg.hdr_type = ICE_RSS_ANY_HEADERS;
+ hcfg.symm = false;
+ status = ice_add_rss_cfg(hw, vsi, &hcfg);
if (status)
break;
}
@@ -2388,6 +2595,54 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
return status;
}
+static bool rss_cfg_symm_valid(u64 hfld)
+{
+ return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)));
+}
+
+/**
+ * ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations
+ * @hw: pointer to the hardware structure
+ * @vsi: VSI to set/unset Symmetric RSS
+ * @symm: TRUE to set Symmetric RSS hashing
+ */
+int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm)
+{
+ struct ice_rss_hash_cfg local;
+ struct ice_rss_cfg *r, *tmp;
+ u16 vsi_handle = vsi->idx;
+ int status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) {
+ if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) {
+ local = r->hash;
+ local.symm = symm;
+ if (symm && !rss_cfg_symm_valid(r->hash.hash_flds))
+ continue;
+
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
/**
* ice_replay_rss_cfg - replay RSS configurations associated with VSI
* @hw: pointer to the hardware structure
@@ -2404,16 +2659,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
if (test_bit(vsi_handle, r->vsis)) {
- status = ice_add_rss_cfg_sync(hw, vsi_handle,
- r->hashed_flds,
- r->packet_hdr,
- ICE_RSS_OUTER_HEADERS);
- if (status)
- break;
- status = ice_add_rss_cfg_sync(hw, vsi_handle,
- r->hashed_flds,
- r->packet_hdr,
- ICE_RSS_INNER_HEADERS);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
if (status)
break;
}
@@ -2428,11 +2674,12 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hdrs: protocol header type
+ * @symm: whether the RSS is symmetric (bool, output)
*
* This function will return the match fields of the first instance of flow
* profile having the given header types and containing input VSI
*/
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm)
{
u64 rss_hash = ICE_HASH_INVALID;
struct ice_rss_cfg *r;
@@ -2444,8 +2691,9 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
- r->packet_hdr == hdrs) {
- rss_hash = r->hashed_flds;
+ r->hash.addl_hdrs == hdrs) {
+ rss_hash = r->hash.hash_flds;
+ *symm = r->hash.symm;
break;
}
mutex_unlock(&hw->rss_locks);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 96923ef0a5a8..2fd2e0cb483d 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -34,14 +34,16 @@
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
-#define ICE_FLOW_HASH_GTP_TEID \
+#define ICE_FLOW_HASH_GTP_C_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
-#define ICE_FLOW_HASH_GTP_IPV4_TEID \
- (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
-#define ICE_FLOW_HASH_GTP_IPV6_TEID \
- (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_C_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_C_TEID)
+#define ICE_FLOW_HASH_GTP_C_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_C_TEID)
#define ICE_FLOW_HASH_GTP_U_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
@@ -64,6 +66,20 @@
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
ICE_FLOW_HASH_GTP_U_EH_QFI)
+#define ICE_FLOW_HASH_GTP_U_UP \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID))
+#define ICE_FLOW_HASH_GTP_U_DWN \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_UP \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_UP)
+#define ICE_FLOW_HASH_GTP_U_IPV6_UP \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_UP)
+#define ICE_FLOW_HASH_GTP_U_IPV4_DWN \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_DWN)
+#define ICE_FLOW_HASH_GTP_U_IPV6_DWN \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_DWN)
+
#define ICE_FLOW_HASH_PPPOE_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
@@ -227,6 +243,26 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_MAX
};
+#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
+#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
+#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
+#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
+#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
+#define ICE_FLOW_HASH_FLD_GTPC_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_IP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_EH_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_UP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
+
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
@@ -279,6 +315,24 @@ enum ice_flow_avf_hdr_field {
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+enum ice_rss_cfg_hdr_type {
+ ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
+ ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
+ /* take inner headers as inputset for packet with outer ipv4. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
+ /* take inner headers as inputset for packet with outer ipv6. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
+ /* take outer headers first then inner headers as inputset */
+ ICE_RSS_ANY_HEADERS
+};
+
+struct ice_rss_hash_cfg {
+ u32 addl_hdrs; /* protocol header fields */
+ u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
+ enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
+ bool symm; /* symmetric or asymmetric hash */
+};
+
enum ice_flow_dir {
ICE_FLOW_RX = 0x02,
};
@@ -289,8 +343,10 @@ enum ice_flow_priority {
ICE_FLOW_PRIO_HIGH
};
+#define ICE_FLOW_SEG_SINGLE 1
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_SEG_RAW_FLD_MAX 2
+#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
@@ -372,20 +428,21 @@ struct ice_flow_prof {
/* software VSI handles referenced by this flow profile */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+
+ bool symm; /* Symmetric Hash for RSS */
};
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
- u64 hashed_flds;
- u32 packet_hdr;
+ struct ice_rss_hash_cfg hash;
};
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof);
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
@@ -401,13 +458,13 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm);
+int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ u64 hashed_flds);
int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-int
-ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs);
-int
-ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs);
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
+int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ const struct ice_rss_hash_cfg *cfg);
+int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 319a2d6fe26c..f81db6c107c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -286,10 +286,9 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Returns: zero on success, or a negative error code on failure.
*/
-static int
-ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
- u16 block_size, u8 *block, bool last_cmd,
- u8 *reset_level, struct netlink_ext_ack *extack)
+int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index 750574885716..04b200462757 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack);
int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
struct netlink_ext_ack *extack);
+int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ u8 *reset_level, struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
new file mode 100644
index 000000000000..4fd15387a7e5
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/vmalloc.h>
+#include "ice.h"
+#include "ice_common.h"
+#include "ice_fwlog.h"
+
+bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
+{
+ u16 head, tail;
+
+ head = rings->head;
+ tail = rings->tail;
+
+ if (head < tail && (tail - head == (rings->size - 1)))
+ return true;
+ else if (head > tail && (tail == (head - 1)))
+ return true;
+
+ return false;
+}
+
+bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
+{
+ return rings->head == rings->tail;
+}
+
+void ice_fwlog_ring_increment(u16 *item, u16 size)
+{
+ *item = (*item + 1) & (size - 1);
+}
+
+static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
+{
+ int i, nr_bytes;
+ u8 *mem;
+
+ nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
+ mem = vzalloc(nr_bytes);
+ if (!mem)
+ return -ENOMEM;
+
+ for (i = 0; i < rings->size; i++) {
+ struct ice_fwlog_data *ring = &rings->rings[i];
+
+ ring->data_size = ICE_AQ_MAX_BUF_LEN;
+ ring->data = mem;
+ mem += ICE_AQ_MAX_BUF_LEN;
+ }
+
+ return 0;
+}
+
+static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
+{
+ int i;
+
+ for (i = 0; i < rings->size; i++) {
+ struct ice_fwlog_data *ring = &rings->rings[i];
+
+ /* the first ring is the base memory for the whole range so
+ * free it
+ */
+ if (!i)
+ vfree(ring->data);
+
+ ring->data = NULL;
+ ring->data_size = 0;
+ }
+}
+
+#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
+/**
+ * ice_fwlog_realloc_rings - reallocate the FW log rings
+ * @hw: pointer to the HW structure
+ * @index: the new index to use to allocate memory for the log data
+ *
+ */
+void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
+{
+ struct ice_fwlog_ring ring;
+ int status, ring_size;
+
+ /* convert the number of bytes into a number of 4K buffers. externally
+ * the driver presents the interface to the FW log data as a number of
+ * bytes because that's easy for users to understand. internally the
+ * driver uses a ring of buffers because the driver doesn't know where
+ * the beginning and end of any line of log data is so the driver has
+ * to overwrite data as complete blocks. when the data is returned to
+ * the user the driver knows that the data is correct and the FW log
+ * can be correctly parsed by the tools
+ */
+ ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
+ if (ring_size == hw->fwlog_ring.size)
+ return;
+
+ /* allocate space for the new rings and buffers then release the
+ * old rings and buffers. that way if we don't have enough
+ * memory then we at least have what we had before
+ */
+ ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
+ if (!ring.rings)
+ return;
+
+ ring.size = ring_size;
+
+ status = ice_fwlog_alloc_ring_buffs(&ring);
+ if (status) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
+ ice_fwlog_free_ring_buffs(&ring);
+ kfree(ring.rings);
+ return;
+ }
+
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+
+ hw->fwlog_ring.rings = ring.rings;
+ hw->fwlog_ring.size = ring.size;
+ hw->fwlog_ring.index = index;
+ hw->fwlog_ring.head = 0;
+ hw->fwlog_ring.tail = 0;
+}
+
+/**
+ * ice_fwlog_init - Initialize FW logging configuration
+ * @hw: pointer to the HW structure
+ *
+ * This function should be called on driver initialization during
+ * ice_init_hw().
+ */
+int ice_fwlog_init(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ ice_fwlog_set_supported(hw);
+
+ if (ice_fwlog_supported(hw)) {
+ int status;
+
+ /* read the current config from the FW and store it */
+ status = ice_fwlog_get(hw, &hw->fwlog_cfg);
+ if (status)
+ return status;
+
+ hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
+ sizeof(*hw->fwlog_ring.rings),
+ GFP_KERNEL);
+ if (!hw->fwlog_ring.rings) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
+ return -ENOMEM;
+ }
+
+ hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
+ hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
+
+ status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
+ if (status) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+ return status;
+ }
+
+ ice_debugfs_fwlog_init(hw->back);
+ } else {
+ dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fwlog_deinit - unroll FW logging configuration
+ * @hw: pointer to the HW structure
+ *
+ * This function should be called in ice_deinit_hw().
+ */
+void ice_fwlog_deinit(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ int status;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ ice_debugfs_pf_deinit(hw->back);
+
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
+ status = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ if (status)
+ dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
+ status);
+
+ kfree(pf->ice_debugfs_pf_fwlog_modules);
+
+ pf->ice_debugfs_pf_fwlog_modules = NULL;
+
+ status = ice_fwlog_unregister(hw);
+ if (status)
+ dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
+ status);
+
+ if (hw->fwlog_ring.rings) {
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+ }
+}
+
+/**
+ * ice_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @hw: pointer to the HW structure
+ *
+ * This will always return false if called before ice_init_hw(), so it must be
+ * called after ice_init_hw().
+ */
+bool ice_fwlog_supported(struct ice_hw *hw)
+{
+ return hw->fwlog_supported;
+}
+
+/**
+ * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
+ * @hw: pointer to the HW structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from ice_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ */
+static int
+ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
+ u16 num_entries, u16 options, u16 log_resolution)
+{
+ struct ice_aqc_fw_log_cfg_resp *fw_modules;
+ struct ice_aqc_fw_log *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ int i;
+
+ fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
+ if (!fw_modules)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ cpu_to_le16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
+
+ if (options & ICE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
+ if (options & ICE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
+
+ status = ice_aq_send_cmd(hw, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries,
+ NULL);
+
+ kfree(fw_modules);
+
+ return status;
+}
+
+/**
+ * ice_fwlog_set - Set the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config used to set firmware logging
+ *
+ * This function should be called whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
+ * for init.
+ */
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ return ice_aq_fwlog_set(hw, cfg->module_entries,
+ ICE_AQC_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+}
+
+/**
+ * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
+ * @hw: pointer to the HW structure
+ * @cfg: firmware logging configuration to populate
+ */
+static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ struct ice_aqc_fw_log_cfg_resp *fw_modules;
+ struct ice_aqc_fw_log *cmd;
+ struct ice_aq_desc desc;
+ u16 module_id_cnt;
+ int status;
+ void *buf;
+ int i;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
+
+ status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
+ goto status_out;
+ }
+
+ module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
+ } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
+ ICE_AQC_FW_LOG_ID_MAX);
+ module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
+ cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
+ cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ le16_to_cpu(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_fwlog_get - Get the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config to populate based on current firmware logging settings
+ */
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ return ice_aq_fwlog_get(hw, cfg);
+}
+
+/**
+ * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
+ * @hw: pointer to the HW structure
+ * @reg: true to register and false to unregister
+ */
+static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
+
+ if (reg)
+ desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_fwlog_register - Register the PF for firmware logging
+ * @hw: pointer to the HW structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in ice_fwlog_set.
+ */
+int ice_fwlog_register(struct ice_hw *hw)
+{
+ int status;
+
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ status = ice_aq_fwlog_register(hw, true);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
+ else
+ hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ice_fwlog_unregister - Unregister the PF from firmware logging
+ * @hw: pointer to the HW structure
+ */
+int ice_fwlog_unregister(struct ice_hw *hw)
+{
+ int status;
+
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ status = ice_aq_fwlog_register(hw, false);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
+ else
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ice_fwlog_set_supported - Set if FW logging is supported by FW
+ * @hw: pointer to the HW struct
+ *
+ * If FW returns success to the ice_aq_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_supported flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ */
+void ice_fwlog_set_supported(struct ice_hw *hw)
+{
+ struct ice_fwlog_cfg *cfg;
+ int status;
+
+ hw->fwlog_supported = false;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ /* don't call ice_fwlog_get() because that would check to see if FW
+ * logging is supported which is what the driver is determining now
+ */
+ status = ice_aq_fwlog_get(hw, cfg);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
+ status);
+ else
+ hw->fwlog_supported = true;
+
+ kfree(cfg);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
new file mode 100644
index 000000000000..287e71fa4b86
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_FWLOG_H_
+#define _ICE_FWLOG_H_
+#include "ice_adminq_cmd.h"
+
+struct ice_hw;
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
+ */
+enum ice_fwlog_level {
+ ICE_FWLOG_LEVEL_NONE = 0,
+ ICE_FWLOG_LEVEL_ERROR = 1,
+ ICE_FWLOG_LEVEL_WARNING = 2,
+ ICE_FWLOG_LEVEL_NORMAL = 3,
+ ICE_FWLOG_LEVEL_VERBOSE = 4,
+ ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ice_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ice_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
+ /* options used to configure firmware logging */
+ u16 options;
+#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ice_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ice_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u16 log_resolution;
+};
+
+struct ice_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ice_fwlog_ring {
+ struct ice_fwlog_data *rings;
+ u16 index;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
+#define ICE_FWLOG_RING_SIZE_DFLT 256
+#define ICE_FWLOG_RING_SIZE_MAX 512
+
+bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
+bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
+void ice_fwlog_ring_increment(u16 *item, u16 size);
+void ice_fwlog_set_supported(struct ice_hw *hw);
+bool ice_fwlog_supported(struct ice_hw *hw);
+int ice_fwlog_init(struct ice_hw *hw);
+void ice_fwlog_deinit(struct ice_hw *hw);
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_register(struct ice_hw *hw);
+int ice_fwlog_unregister(struct ice_hw *hw);
+void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
+#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 86936b758ade..cfac1d432c15 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -200,6 +200,8 @@
#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
#define GLINT_VECT2FUNC_IS_PF_S 16
#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
+#define PFINT_ALLOC 0x001D2600
+#define PFINT_ALLOC_FIRST ICE_M(0x7FF, 0)
#define PFINT_FW_CTL 0x0016C800
#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_FW_CTL_ITR_INDX_S 11
@@ -404,6 +406,10 @@
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
#define GLQF_HMASK_SEL_MAX_INDEX 127
#define GLQF_HMASK_SEL_MASK_SEL_S 0
+#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_HSYMM_REG_SIZE 4
+#define GLQF_HSYMM_REG_PER_PROF 6
+#define GLQF_HSYMM_ENABLE_BIT BIT(7)
#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
new file mode 100644
index 000000000000..e4c2c1bff6c0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_hwmon.h"
+#include "ice_adminq_cmd.h"
+
+#include <linux/hwmon.h>
+
+#define TEMP_FROM_REG(reg) ((reg) * 1000)
+
+static const struct hwmon_channel_info *ice_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_EMERGENCY),
+ NULL
+};
+
+static int ice_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct ice_aqc_get_sensor_reading_resp resp;
+ struct ice_pf *pf = dev_get_drvdata(dev);
+ int ret;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ ret = ice_aq_get_sensor_reading(&pf->hw, &resp);
+ if (ret) {
+ dev_warn_ratelimited(dev,
+ "%s HW read failure (%d)\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp);
+ break;
+ case hwmon_temp_max:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_warning_threshold);
+ break;
+ case hwmon_temp_crit:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_critical_threshold);
+ break;
+ case hwmon_temp_emergency:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_fatal_threshold);
+ break;
+ default:
+ dev_dbg(dev, "%s unsupported attribute (%d)\n",
+ __func__, attr);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t ice_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit:
+ case hwmon_temp_max:
+ case hwmon_temp_emergency:
+ return 0444;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops ice_hwmon_ops = {
+ .is_visible = ice_hwmon_is_visible,
+ .read = ice_hwmon_read
+};
+
+static const struct hwmon_chip_info ice_chip_info = {
+ .ops = &ice_hwmon_ops,
+ .info = ice_hwmon_info
+};
+
+static bool ice_is_internal_reading_supported(struct ice_pf *pf)
+{
+ /* Only the first PF will report temperature for a chip.
+ * Note that internal temp reading is not supported
+ * for older FW (< v4.30).
+ */
+ if (pf->hw.pf_id)
+ return false;
+
+ unsigned long sensors = pf->hw.dev_caps.supported_sensors;
+
+ return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+};
+
+void ice_hwmon_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct device *hdev;
+
+ if (!ice_is_internal_reading_supported(pf))
+ return;
+
+ hdev = hwmon_device_register_with_info(dev, "ice", pf, &ice_chip_info,
+ NULL);
+ if (IS_ERR(hdev)) {
+ dev_warn(dev,
+ "hwmon_device_register_with_info returns error (%ld)",
+ PTR_ERR(hdev));
+ return;
+ }
+ pf->hwmon_dev = hdev;
+}
+
+void ice_hwmon_exit(struct ice_pf *pf)
+{
+ if (!pf->hwmon_dev)
+ return;
+ hwmon_device_unregister(pf->hwmon_dev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.h b/drivers/net/ethernet/intel/ice/ice_hwmon.h
new file mode 100644
index 000000000000..d66d40354f5a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023, Intel Corporation. */
+
+#ifndef _ICE_HWMON_H_
+#define _ICE_HWMON_H_
+
+#ifdef CONFIG_ICE_HWMON
+void ice_hwmon_init(struct ice_pf *pf);
+void ice_hwmon_exit(struct ice_pf *pf);
+#else /* CONFIG_ICE_HWMON */
+static inline void ice_hwmon_init(struct ice_pf *pf) { }
+static inline void ice_hwmon_exit(struct ice_pf *pf) { }
+#endif /* CONFIG_ICE_HWMON */
+
+#endif /* _ICE_HWMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index b980f89dc892..1ccb572ce285 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
}
/**
+ * ice_pkg_has_lport_extract - check if lport extraction supported
+ * @hw: HW struct
+ */
+static bool ice_pkg_has_lport_extract(struct ice_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) {
+ u16 offset;
+ u8 fv_prot;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i,
+ &fv_prot, &offset);
+ if (fv_prot == ICE_FV_PROT_MDID &&
+ offset == ICE_LP_EXT_BUF_OFFSET)
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_lag_find_primary - returns pointer to primary interfaces lag struct
* @lag: local interfaces lag struct
*/
@@ -181,11 +202,12 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag)
* @act: rule action
* @recipe_id: recipe id for the new rule
* @rule_idx: pointer to rule index
+ * @direction: ICE_FLTR_RX or ICE_FLTR_TX
* @add: boolean on whether we are adding filters
*/
static int
ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
- bool add)
+ u8 direction, bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
u16 s_rule_sz, vsi_num;
@@ -208,12 +230,18 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
eth_hdr = s_rule->hdr_data;
ice_fill_eth_hdr(eth_hdr);
- act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num);
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
s_rule->recipe_id = cpu_to_le16(recipe_id);
- s_rule->src = cpu_to_le16(hw->port_info->lport);
+ if (direction == ICE_FLTR_RX) {
+ s_rule->hdr.type =
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
+ } else {
+ s_rule->hdr.type =
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->src = cpu_to_le16(vsi_num);
+ }
s_rule->act = cpu_to_le32(act);
s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN);
opc = ice_aqc_opc_add_sw_rules;
@@ -246,9 +274,27 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
{
u32 act = ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
+ int err;
+
+ err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id,
+ ICE_FLTR_RX, add);
+ if (err)
+ goto err_rx;
+
+ act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT |
+ ICE_SINGLE_ACT_LB_ENABLE;
+ err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_tx_rule_id,
+ ICE_FLTR_TX, add);
+ if (err)
+ goto err_tx;
+
+ return 0;
- return ice_lag_cfg_fltr(lag, act, lag->pf_recipe,
- &lag->pf_rule_id, add);
+err_tx:
+ ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id,
+ ICE_FLTR_RX, !add);
+err_rx:
+ return err;
}
/**
@@ -264,7 +310,7 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
ICE_SINGLE_ACT_DROP;
return ice_lag_cfg_fltr(lag, act, lag->lport_recipe,
- &lag->lport_rule_idx, add);
+ &lag->lport_rule_idx, ICE_FLTR_RX, add);
}
/**
@@ -290,7 +336,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
dev = ice_pf_to_dev(lag->pf);
/* interface not active - remove old default VSI rule */
- if (bonding_info->slave.state && lag->pf_rule_id) {
+ if (bonding_info->slave.state && lag->pf_rx_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, false))
dev_err(dev, "Error removing old default VSI filter\n");
if (ice_lag_cfg_drop_fltr(lag, true))
@@ -299,7 +345,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/* interface becoming active - add new default VSI rule */
- if (!bonding_info->slave.state && !lag->pf_rule_id) {
+ if (!bonding_info->slave.state && !lag->pf_rx_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, true))
dev_err(dev, "Error adding new default VSI filter\n");
if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false))
@@ -471,7 +517,7 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -570,6 +616,50 @@ resume_traffic:
}
/**
+ * ice_lag_build_netdev_list - populate the lag struct's netdev list
+ * @lag: local lag struct
+ * @ndlist: pointer to netdev list to populate
+ */
+static void ice_lag_build_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *nl;
+ struct net_device *tmp_nd;
+
+ INIT_LIST_HEAD(&ndlist->node);
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
+ if (!nl)
+ break;
+
+ nl->netdev = tmp_nd;
+ list_add(&nl->node, &ndlist->node);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = &ndlist->node;
+}
+
+/**
+ * ice_lag_destroy_netdev_list - free lag struct's netdev list
+ * @lag: pointer to local lag struct
+ * @ndlist: pointer to lag struct netdev list
+ */
+static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *entry, *n;
+
+ rcu_read_lock();
+ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = NULL;
+}
+
+/**
* ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
* @lag: primary interface LAG struct
* @oldport: lport of previous interface
@@ -597,7 +687,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
{
struct ice_lag_netdev_list ndlist;
- struct list_head *tmp, *n;
u8 pri_port, act_port;
struct ice_lag *lag;
struct ice_vsi *vsi;
@@ -621,38 +710,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
pri_port = pf->hw.port_info->lport;
act_port = lag->active_port;
- if (lag->upper_netdev) {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- }
-
- lag->netdev_head = &ndlist.node;
+ if (lag->upper_netdev)
+ ice_lag_build_netdev_list(lag, &ndlist);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
lag->bonded && lag->primary && pri_port != act_port &&
!list_empty(lag->netdev_head))
ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
- lag->netdev_head = NULL;
+ ice_lag_destroy_netdev_list(lag, &ndlist);
new_vf_unlock:
mutex_unlock(&pf->lag_mutex);
@@ -674,11 +740,33 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
+/**
+ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
+ * @lag: local lag struct
+ * @src_prt: lport value for source port
+ * @dst_prt: lport value for destination port
+ *
+ * This function is used to move nodes during an out-of-netdev-event situation,
+ * primarily when the driver needs to reconfigure or recreate resources.
+ *
+ * Must be called while holding the lag_mutex to avoid lag events from
+ * processing while out-of-sync moves are happening. Also, paired moves,
+ * such as used in a reset flow, should both be called under the same mutex
+ * lock to avoid changes between start of reset and end of reset.
+ */
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
+{
+ struct ice_lag_netdev_list ndlist;
+
+ ice_lag_build_netdev_list(lag, &ndlist);
+ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
#define ICE_LAG_SRIOV_CP_RECIPE 10
#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
@@ -711,9 +799,7 @@ ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
ICE_SINGLE_ACT_LAN_ENABLE |
ICE_SINGLE_ACT_VALID_BIT |
- ((vsi->vsi_num <<
- ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M));
+ FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num));
s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
opc = ice_aqc_opc_add_sw_rules;
@@ -788,7 +874,7 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -892,8 +978,7 @@ ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_for_each_traffic_class(tc)
ice_lag_reclaim_vf_tc(lag, src_hw, i, tc);
}
@@ -1166,7 +1251,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
}
/**
- * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG
* @pf: PF struct
*/
static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
@@ -1179,7 +1264,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
else
ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
- if (caps->sriov_lag)
+ if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw))
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
@@ -1555,18 +1640,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{
- struct ice_lag_netdev_list *entry;
struct ice_netdev_priv *np;
- struct net_device *netdev;
struct ice_pf *pf;
- list_for_each_entry(entry, lag->netdev_head, node) {
- netdev = entry->netdev;
- np = netdev_priv(netdev);
- pf = np->vsi->back;
-
- ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
- }
+ np = netdev_priv(lag->netdev);
+ pf = np->vsi->back;
+ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
}
/**
@@ -1698,7 +1777,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
- nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
+ nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
if (!nd_list)
break;
@@ -1818,7 +1897,7 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -1921,8 +2000,7 @@ ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_for_each_traffic_class(tc)
ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i,
tc);
@@ -1944,6 +2022,8 @@ int ice_init_lag(struct ice_pf *pf)
int n, err;
ice_lag_init_feature_support_flag(pf);
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
+ return 0;
pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!pf->lag)
@@ -1984,7 +2064,7 @@ int ice_init_lag(struct ice_pf *pf)
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ &recipe_bits, NULL);
if (err)
continue;
@@ -1992,7 +2072,7 @@ int ice_init_lag(struct ice_pf *pf)
recipe_bits |= BIT(lag->pf_recipe) |
BIT(lag->lport_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ recipe_bits, NULL);
}
}
@@ -2057,7 +2137,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
{
struct ice_lag_netdev_list ndlist;
struct ice_lag *lag, *prim_lag;
- struct list_head *tmp, *n;
u8 act_port, loc_port;
if (!pf->lag || !pf->lag->bonded)
@@ -2069,21 +2148,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
if (lag->primary) {
prim_lag = lag;
} else {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_KERNEL);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- lag->netdev_head = &ndlist.node;
+ ice_lag_build_netdev_list(lag, &ndlist);
prim_lag = ice_lag_find_primary(lag);
}
@@ -2107,19 +2172,13 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_lag_cfg_cp_fltr(lag, true);
- if (lag->pf_rule_id)
+ if (lag->pf_rx_rule_id)
if (ice_lag_cfg_dflt_fltr(lag, true))
dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
ice_clear_rdma_cap(pf);
lag_rebuild_out:
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
+ ice_lag_destroy_netdev_list(lag, &ndlist);
mutex_unlock(&pf->lag_mutex);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 9557e8605a07..bab2c83142a1 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -17,6 +17,9 @@ enum ice_lag_role {
#define ICE_LAG_INVALID_PORT 0xFF
#define ICE_LAG_RESET_RETRIES 5
+#define ICE_SW_DEFAULT_PROFILE 0
+#define ICE_FV_PROT_MDID 255
+#define ICE_LP_EXT_BUF_OFFSET 32
struct ice_pf;
struct ice_vf;
@@ -40,7 +43,8 @@ struct ice_lag {
u8 primary:1; /* this is primary */
u16 pf_recipe;
u16 lport_recipe;
- u16 pf_rule_id;
+ u16 pf_rx_rule_id;
+ u16 pf_tx_rule_id;
u16 cp_rule_idx;
u16 lport_rule_idx;
u8 role;
@@ -65,4 +69,5 @@ int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 89f986a75cc8..611577ebc29d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -160,64 +160,6 @@ struct ice_fltr_desc {
(0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S)
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
-struct ice_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:2;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum ice_rx_ptype_outer_ip {
- ICE_RX_PTYPE_OUTER_L2 = 0,
- ICE_RX_PTYPE_OUTER_IP = 1,
-};
-
-enum ice_rx_ptype_outer_ip_ver {
- ICE_RX_PTYPE_OUTER_NONE = 0,
- ICE_RX_PTYPE_OUTER_IPV4 = 1,
- ICE_RX_PTYPE_OUTER_IPV6 = 2,
-};
-
-enum ice_rx_ptype_outer_fragmented {
- ICE_RX_PTYPE_NOT_FRAG = 0,
- ICE_RX_PTYPE_FRAG = 1,
-};
-
-enum ice_rx_ptype_tunnel_type {
- ICE_RX_PTYPE_TUNNEL_NONE = 0,
- ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum ice_rx_ptype_tunnel_end_prot {
- ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
- ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum ice_rx_ptype_inner_prot {
- ICE_RX_PTYPE_INNER_PROT_NONE = 0,
- ICE_RX_PTYPE_INNER_PROT_UDP = 1,
- ICE_RX_PTYPE_INNER_PROT_TCP = 2,
- ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
- ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
- ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
-};
-
-enum ice_rx_ptype_payload_layer {
- ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
/* Rx Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
@@ -651,262 +593,4 @@ struct ice_tlan_ctx {
u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
-/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT ice_ptype_lkup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum ice_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- ICE_RX_PTYPE_OUTER_##OUTER_IP, \
- ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- ICE_RX_PTYPE_##OUTER_FRAG, \
- ICE_RX_PTYPE_TUNNEL_##T, \
- ICE_RX_PTYPE_TUNNEL_END_##TE, \
- ICE_RX_PTYPE_##TEF, \
- ICE_RX_PTYPE_INNER_PROT_##I, \
- ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
-#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
-
-/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
-static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
- /* L2 Packet types */
- ICE_PTT_UNUSED_ENTRY(0),
- ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- ICE_PTT_UNUSED_ENTRY(2),
- ICE_PTT_UNUSED_ENTRY(3),
- ICE_PTT_UNUSED_ENTRY(4),
- ICE_PTT_UNUSED_ENTRY(5),
- ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT_UNUSED_ENTRY(8),
- ICE_PTT_UNUSED_ENTRY(9),
- ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT_UNUSED_ENTRY(12),
- ICE_PTT_UNUSED_ENTRY(13),
- ICE_PTT_UNUSED_ENTRY(14),
- ICE_PTT_UNUSED_ENTRY(15),
- ICE_PTT_UNUSED_ENTRY(16),
- ICE_PTT_UNUSED_ENTRY(17),
- ICE_PTT_UNUSED_ENTRY(18),
- ICE_PTT_UNUSED_ENTRY(19),
- ICE_PTT_UNUSED_ENTRY(20),
- ICE_PTT_UNUSED_ENTRY(21),
-
- /* Non Tunneled IPv4 */
- ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(25),
- ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(32),
- ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(39),
- ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(47),
- ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(54),
- ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(62),
- ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(69),
- ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(77),
- ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(84),
- ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(91),
- ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(98),
- ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(105),
- ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(113),
- ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(120),
- ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(128),
- ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(135),
- ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(143),
- ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(150),
- ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
-static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
-{
- return ice_ptype_lkup[ptype];
-}
-
-
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 4b1e56396293..5371e91f6bbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,7 +7,6 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
-#include "ice_devlink.h"
#include "ice_vsi_vlan_ops.h"
/**
@@ -27,8 +26,6 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_CHNL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
- case ICE_VSI_SWITCHDEV_CTRL:
- return "ICE_VSI_SWITCHDEV_CTRL";
default:
return "unknown";
}
@@ -144,7 +141,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -211,14 +207,6 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- /* The number of queues for ctrl VSI is equal to number of VFs.
- * Each ring is associated to the corresponding VF_PR netdev.
- */
- vsi->alloc_txq = ice_get_num_vfs(pf);
- vsi->alloc_rxq = vsi->alloc_txq;
- vsi->num_q_vectors = 1;
- break;
case ICE_VSI_VF:
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -515,24 +503,6 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
-{
- struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- struct ice_pf *pf = q_vector->vsi->back;
- struct ice_vf *vf;
- unsigned int bkt;
-
- if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
- return IRQ_HANDLED;
-
- rcu_read_lock();
- ice_for_each_vf_rcu(pf, bkt, vf)
- napi_schedule(&vf->repr->q_vector->napi);
- rcu_read_unlock();
-
- return IRQ_HANDLED;
-}
-
/**
* ice_vsi_alloc_stat_arrays - Allocate statistics arrays
* @vsi: VSI pointer
@@ -595,10 +565,6 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
}
switch (vsi->type) {
- case ICE_VSI_SWITCHDEV_CTRL:
- /* Setup eswitch MSIX irq handler for VSI */
- vsi->irq_handler = ice_eswitch_msix_clean_rings;
- break;
case ICE_VSI_PF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
@@ -928,11 +894,6 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- vsi->rss_table_size = ICE_LUT_VSI_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
- vsi->rss_lut_type = ICE_LUT_VSI;
- break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
@@ -969,9 +930,8 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
/* allow all untagged/tagged packets by default on Tx */
- ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
- ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
- ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
+ ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
/* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
* results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
*
@@ -979,15 +939,14 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
*/
if (ice_is_dvm_ena(hw)) {
ctxt->info.inner_vlan_flags |=
- ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+ FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
ctxt->info.outer_vlan_flags =
- (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
ctxt->info.outer_vlan_flags |=
- (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
- ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
+ ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
ctxt->info.outer_vlan_flags |=
FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
@@ -1066,10 +1025,8 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
- qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
- ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
offset += num_rxq_per_tc;
tx_count += num_txq_per_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
@@ -1152,18 +1109,14 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ctxt->info.max_fd_fltr_shared =
cpu_to_le16(vsi->num_bfltr);
/* default queue index within the VSI of the default FD */
- val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
- ICE_AQ_VSI_FD_DEF_Q_M);
+ val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q);
/* target queue or queue group to the FD filter */
- val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
- ICE_AQ_VSI_FD_DEF_GRP_M);
+ val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group);
ctxt->info.fd_def_q = cpu_to_le16(val);
/* queue index on which FD filter completion is reported */
- val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
- ICE_AQ_VSI_FD_REPORT_Q_M);
+ val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q);
/* priority of the default qindex action */
- val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
- ICE_AQ_VSI_FD_DEF_PRIORITY_M);
+ val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio);
ctxt->info.fd_report_opt = cpu_to_le16(val);
}
@@ -1186,12 +1139,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_VF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default:
dev_dbg(dev, "Unsupported VSI type %s\n",
@@ -1199,9 +1150,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
return;
}
- ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ vsi->rss_hfunc = hash_type;
+
+ ctxt->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
}
static void
@@ -1215,10 +1169,8 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
pow = order_base_2(qcount);
- qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
- ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
@@ -1267,7 +1219,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -1600,12 +1551,81 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
}
+static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
+ /* configure RSS for IPv4 with input set IP src/dst */
+ {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for IPv6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+ {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+ {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for sctp4 with input set IP src/dst - only support
+ * RSS on SCTPv4 on outer headers (non-tunneled)
+ */
+ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc4 with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc4t with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4 with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4e with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4u with input set IPv4 src/dst */
+ { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4d with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false},
+
+ /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+ {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+ {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for sctp6 with input set IPv6 src/dst - only support
+ * RSS on SCTPv6 on outer headers (non-tunneled)
+ */
+ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
+ {ICE_FLOW_SEG_HDR_ESP,
+ ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc6t with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6e with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6u with input set IPv6 src/dst */
+ { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6d with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false},
+};
+
/**
* ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
* @vsi: VSI to be configured
@@ -1619,11 +1639,12 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
*/
static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
{
- u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
+ u16 vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct device *dev;
int status;
+ u32 i;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1631,87 +1652,14 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
vsi_num);
return;
}
- /* configure RSS for IPv4 with input set IP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for IPv6 with input set IPv6 src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
- ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
- ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
- ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for sctp4 with input set IP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
- ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
- ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for sctp6 with input set IPv6 src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
- ICE_FLOW_SEG_HDR_ESP);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
- vsi_num, status);
-}
+ for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
+ const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
-/**
- * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
- * @vsi: VSI
- */
-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
-{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
+ status = ice_add_rss_cfg(hw, vsi, cfg);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
+ cfg->addl_hdrs, cfg->hash_flds,
+ cfg->hdr_type, cfg->symm);
}
}
@@ -1808,11 +1756,8 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
QRXFLXP_CNTXT_RXDID_PRIO_M |
QRXFLXP_CNTXT_TS_M);
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
+ regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid);
+ regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio);
if (ena_ts)
/* Enable TimeSync on this queue */
@@ -1821,114 +1766,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
-{
- if (q_idx >= vsi->num_rxq)
- return -EINVAL;
-
- return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
-}
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
-
- if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
-}
-
-/**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
- */
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
-{
- u16 i;
-
- if (vsi->type == ICE_VSI_VF)
- goto setup_rings;
-
- ice_vsi_cfg_frame_size(vsi);
-setup_rings:
- /* set up individual rings */
- ice_for_each_rxq(vsi, i) {
- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
-
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- * @rings: Tx ring array to be configured
- * @count: number of Tx ring array elements
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- int err = 0;
- u16 q_idx;
-
- qg_buf->num_txqs = 1;
-
- for (q_idx = 0; q_idx < count; q_idx++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
- * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
-{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
-}
-
-/**
- * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx queues dedicated for XDP in given VSI for operation.
- */
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
-{
- int ret;
- int i;
-
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
- if (ret)
- return ret;
-
- ice_for_each_rxq(vsi, i)
- ice_tx_xsk_pool(vsi, i);
-
- return 0;
-}
-
/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
@@ -2263,7 +2100,6 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2371,6 +2207,9 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
} else {
max_txqs[i] = vsi->alloc_txq;
}
+
+ if (vsi->type == ICE_VSI_PF)
+ max_txqs[i] += vsi->num_xdp_txq;
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
@@ -2388,10 +2227,8 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
/**
* ice_vsi_cfg_def - configure default VSI based on the type
* @vsi: pointer to VSI
- * @params: the parameters to configure this VSI with
*/
-static int
-ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+static int ice_vsi_cfg_def(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_pf *pf = vsi->back;
@@ -2399,7 +2236,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
vsi->vsw = pf->first_sw;
- ret = ice_vsi_alloc_def(vsi, params->ch);
+ ret = ice_vsi_alloc_def(vsi, vsi->ch);
if (ret)
return ret;
@@ -2424,7 +2261,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi, params->flags);
+ ret = ice_vsi_init(vsi, vsi->flags);
if (ret)
goto unroll_get_qs;
@@ -2432,7 +2269,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
switch (vsi->type) {
case ICE_VSI_CTRL:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2447,6 +2283,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi);
+
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
@@ -2541,23 +2381,16 @@ unroll_vsi_alloc:
/**
* ice_vsi_cfg - configure a previously allocated VSI
* @vsi: pointer to VSI
- * @params: parameters used to configure this VSI
*/
-int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+int ice_vsi_cfg(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int ret;
- if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- vsi->type = params->type;
- vsi->port_info = params->pi;
-
- /* For VSIs which don't have a connected VF, this will be NULL */
- vsi->vf = params->vf;
-
- ret = ice_vsi_cfg_def(vsi, params);
+ ret = ice_vsi_cfg_def(vsi);
if (ret)
return ret;
@@ -2620,10 +2453,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
- if (vsi->agg_node) {
- vsi->agg_node->valid = false;
- vsi->agg_node->agg_id = 0;
- }
}
/**
@@ -2647,7 +2476,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
* a port_info structure for it.
*/
if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
- WARN_ON(!params->pi))
+ WARN_ON(!params->port_info))
return NULL;
vsi = ice_vsi_alloc(pf);
@@ -2656,7 +2485,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
return NULL;
}
- ret = ice_vsi_cfg(vsi, params);
+ vsi->params = *params;
+ ret = ice_vsi_cfg(vsi);
if (ret)
goto err_vsi_cfg;
@@ -2865,65 +2695,126 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL ||
- vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL) {
ice_vsi_close(vsi);
}
}
/**
- * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
- * @vsi: the VSI being un-configured
+ * __ice_queue_set_napi - Set the napi instance for the queue
+ * @dev: device to which NAPI and queue belong
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ * @locked: is the rtnl_lock already held
+ *
+ * Set the napi instance for the queue. Caller indicates the lock status.
*/
-void ice_vsi_dis_irq(struct ice_vsi *vsi)
+static void
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
+{
+ if (!locked)
+ rtnl_lock();
+ netif_queue_set_napi(dev, queue_index, type, napi);
+ if (!locked)
+ rtnl_unlock();
+}
+
+/**
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi)
{
struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
- int i;
- /* disable interrupt causation from each queue */
- if (vsi->tx_rings) {
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i]) {
- u16 reg;
+ if (!vsi->netdev)
+ return;
- reg = vsi->tx_rings[i]->reg_idx;
- val = rd32(hw, QINT_TQCTL(reg));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(reg), val);
- }
- }
- }
+ if (current_work() == &pf->serv_task ||
+ test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+ test_bit(ICE_DOWN, pf->state) ||
+ test_bit(ICE_SUSPENDED, pf->state))
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ false);
+ else
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ true);
+}
- if (vsi->rx_rings) {
- ice_for_each_rxq(vsi, i) {
- if (vsi->rx_rings[i]) {
- u16 reg;
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
- reg = vsi->rx_rings[i]->reg_idx;
- val = rd32(hw, QINT_RQCTL(reg));
- val &= ~QINT_RQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_RQCTL(reg), val);
- }
- }
- }
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
- /* disable each interrupt */
- ice_for_each_q_vector(vsi, i) {
- if (!vsi->q_vectors[i])
- continue;
- wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
- }
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
- ice_flush(hw);
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector
+ */
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
- /* don't call synchronize_irq() for VF's from the host */
- if (vsi->type == ICE_VSI_VF)
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_vsi_set_napi_queues
+ * @vsi: VSI pointer
+ *
+ * Associate queue[s] with napi for all vectors
+ */
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->netdev)
return;
ice_for_each_q_vector(vsi, i)
- synchronize_irq(vsi->q_vectors[i]->irq.virq);
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
}
/**
@@ -3071,27 +2962,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
/**
- * ice_vsi_realloc_stat_arrays - Frees unused stat structures
+ * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
* @vsi: VSI pointer
- * @prev_txq: Number of Tx rings before ring reallocation
- * @prev_rxq: Number of Rx rings before ring reallocation
*/
-static void
-ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
+static int
+ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
{
+ u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
+ u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
struct ice_vsi_stats *vsi_stat;
struct ice_pf *pf = vsi->back;
+ u16 prev_txq = vsi->alloc_txq;
+ u16 prev_rxq = vsi->alloc_rxq;
int i;
- if (!prev_txq || !prev_rxq)
- return;
- if (vsi->type == ICE_VSI_CHNL)
- return;
-
vsi_stat = pf->vsi_stats[vsi->idx];
- if (vsi->num_txq < prev_txq) {
- for (i = vsi->num_txq; i < prev_txq; i++) {
+ if (req_txq < prev_txq) {
+ for (i = req_txq; i < prev_txq; i++) {
if (vsi_stat->tx_ring_stats[i]) {
kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
@@ -3099,14 +2989,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
}
}
- if (vsi->num_rxq < prev_rxq) {
- for (i = vsi->num_rxq; i < prev_rxq; i++) {
+ tx_ring_stats = vsi_stat->tx_ring_stats;
+ vsi_stat->tx_ring_stats =
+ krealloc_array(vsi_stat->tx_ring_stats, req_txq,
+ sizeof(*vsi_stat->tx_ring_stats),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vsi_stat->tx_ring_stats) {
+ vsi_stat->tx_ring_stats = tx_ring_stats;
+ return -ENOMEM;
+ }
+
+ if (req_rxq < prev_rxq) {
+ for (i = req_rxq; i < prev_rxq; i++) {
if (vsi_stat->rx_ring_stats[i]) {
kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
}
}
}
+
+ rx_ring_stats = vsi_stat->rx_ring_stats;
+ vsi_stat->rx_ring_stats =
+ krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
+ sizeof(*vsi_stat->rx_ring_stats),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vsi_stat->rx_ring_stats) {
+ vsi_stat->rx_ring_stats = rx_ring_stats;
+ return -ENOMEM;
+ }
+
+ return 0;
}
/**
@@ -3121,22 +3033,28 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
*/
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
- struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int ret, prev_txq, prev_rxq;
- int prev_num_q_vectors = 0;
+ int prev_num_q_vectors;
struct ice_pf *pf;
+ int ret;
if (!vsi)
return -EINVAL;
- params = ice_vsi_to_params(vsi);
- params.flags = vsi_flags;
-
+ vsi->flags = vsi_flags;
pf = vsi->back;
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
+ ret = ice_vsi_realloc_stat_arrays(vsi);
+ if (ret)
+ goto err_vsi_cfg;
+
+ ice_vsi_decfg(vsi);
+ ret = ice_vsi_cfg_def(vsi);
+ if (ret)
+ goto err_vsi_cfg;
+
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
if (!coalesce)
@@ -3144,14 +3062,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
- prev_txq = vsi->num_txq;
- prev_rxq = vsi->num_rxq;
-
- ice_vsi_decfg(vsi);
- ret = ice_vsi_cfg_def(vsi, &params);
- if (ret)
- goto err_vsi_cfg;
-
ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) {
if (vsi_flags & ICE_VSI_FLAG_INIT) {
@@ -3163,8 +3073,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
- ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
-
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
@@ -3172,8 +3080,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
err_vsi_cfg_tc_lan:
ice_vsi_decfg(vsi);
-err_vsi_cfg:
kfree(coalesce);
+err_vsi_cfg:
return ret;
}
@@ -3316,9 +3224,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
pow = order_base_2(tc0_qcount);
- qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index f24f5d1e6f9c..94ce8964dda6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -11,57 +11,12 @@
#define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
-/**
- * struct ice_vsi_cfg_params - VSI configuration parameters
- * @pi: pointer to the port_info instance for the VSI
- * @ch: pointer to the channel structure for the VSI, may be NULL
- * @vf: pointer to the VF associated with this VSI, may be NULL
- * @type: the type of VSI to configure
- * @flags: VSI flags used for rebuild and configuration
- *
- * Parameter structure used when configuring a new VSI.
- */
-struct ice_vsi_cfg_params {
- struct ice_port_info *pi;
- struct ice_channel *ch;
- struct ice_vf *vf;
- enum ice_vsi_type type;
- u32 flags;
-};
-
-/**
- * ice_vsi_to_params - Get parameters for an existing VSI
- * @vsi: the VSI to get parameters for
- *
- * Fill a parameter structure for reconfiguring a VSI with its current
- * parameters, such as during a rebuild operation.
- */
-static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi)
-{
- struct ice_vsi_cfg_params params = {};
-
- params.pi = vsi->port_info;
- params.ch = vsi->ch;
- params.vf = vsi->vf;
- params.type = vsi->type;
-
- return params;
-}
-
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi);
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
-
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
-
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
-
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
@@ -72,8 +27,6 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
-
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -91,6 +44,16 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
@@ -101,7 +64,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
-int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
+int ice_vsi_cfg(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
@@ -110,8 +73,6 @@ void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool ena_ts);
-void ice_vsi_dis_irq(struct ice_vsi *vsi);
-
void ice_vsi_free_irq(struct ice_vsi *vsi);
void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 6607fa6fe556..f60c022f7960 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -13,7 +13,9 @@
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
+#include "devlink/devlink_port.h"
+#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
* ice driver.
@@ -35,6 +37,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -612,7 +615,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_prepare_for_reset(pf);
+ ice_ptp_prepare_for_reset(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
@@ -979,7 +982,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
* Octets 13 - 20 are TSA values - leave as zeros
*/
buf[5] = 0x64;
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += len + 2;
tlv = (struct ice_lldp_org_tlv *)
((char *)tlv + sizeof(tlv->typelen) + len);
@@ -1013,7 +1016,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
/* Octet 1 left as all zeros - PFC disabled */
buf[0] = 0x08;
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += len + 2;
if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
@@ -1252,6 +1255,32 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
+ * ice_get_fwlog_data - copy the FW log data from ARQ event
+ * @pf: PF that the FW log event is associated with
+ * @event: event structure containing FW log data
+ */
+static void
+ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ struct ice_fwlog_data *fwlog;
+ struct ice_hw *hw = &pf->hw;
+
+ fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
+
+ memset(fwlog->data, 0, PAGE_SIZE);
+ fwlog->data_size = le16_to_cpu(event->desc.datalen);
+
+ memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
+ ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
+
+ if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
+ /* the rings are full so bump the head to create room */
+ ice_fwlog_ring_increment(&hw->fwlog_ring.head,
+ hw->fwlog_ring.size);
+ }
+}
+
+/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1532,8 +1561,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vc_process_vf_msg(pf, &event, &data);
break;
- case ice_aqc_opc_fw_logging:
- ice_output_fw_log(hw, &event.desc, event.msg_buf);
+ case ice_aqc_opc_fw_logs_event:
+ ice_get_fwlog_data(pf, &event);
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
@@ -1622,8 +1651,10 @@ static void ice_clean_sbq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- /* Nothing to do here if sideband queue is not supported */
- if (!ice_is_sbq_supported(hw)) {
+ /* if mac_type is not generic, sideband is not supported
+ * and there's nothing to do here
+ */
+ if (!ice_is_generic_mac(hw)) {
clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
return;
}
@@ -1716,6 +1747,39 @@ static void ice_service_timer(struct timer_list *t)
}
/**
+ * ice_mdd_maybe_reset_vf - reset VF after MDD event
+ * @pf: pointer to the PF structure
+ * @vf: pointer to the VF structure
+ * @reset_vf_tx: whether Tx MDD has occurred
+ * @reset_vf_rx: whether Rx MDD has occurred
+ *
+ * Since the queue can get stuck on VF MDD events, the PF can be configured to
+ * automatically reset the VF by enabling the private ethtool flag
+ * mdd-auto-reset-vf.
+ */
+static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
+ bool reset_vf_tx, bool reset_vf_rx)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ return;
+
+ /* VF MDD event counters will be cleared by reset, so print the event
+ * prior to reset.
+ */
+ if (reset_vf_tx)
+ ice_print_vf_tx_mdd_event(vf);
+
+ if (reset_vf_rx)
+ ice_print_vf_rx_mdd_event(vf);
+
+ dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
+ pf->hw.pf_id, vf->vf_id);
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+}
+
+/**
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -1744,14 +1808,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) {
- u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
- GL_MDET_TX_PQM_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
- GL_MDET_TX_PQM_VF_NUM_S;
- u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
- GL_MDET_TX_PQM_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
- GL_MDET_TX_PQM_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
@@ -1761,14 +1821,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
if (reg & GL_MDET_TX_TCLAN_VALID_M) {
- u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
- GL_MDET_TX_TCLAN_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
- GL_MDET_TX_TCLAN_VF_NUM_S;
- u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
- GL_MDET_TX_TCLAN_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
- GL_MDET_TX_TCLAN_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
@@ -1778,14 +1834,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, GL_MDET_RX);
if (reg & GL_MDET_RX_VALID_M) {
- u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
- GL_MDET_RX_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
- GL_MDET_RX_VF_NUM_S;
- u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
- GL_MDET_RX_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
- GL_MDET_RX_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
@@ -1820,6 +1872,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
*/
mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
+ bool reset_vf_tx = false, reset_vf_rx = false;
+
reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
@@ -1828,6 +1882,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
@@ -1838,6 +1894,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
@@ -1848,6 +1906,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_RX(vf->vf_id));
@@ -1859,18 +1919,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
vf->vf_id);
- /* Since the queue is disabled on VF Rx MDD events, the
- * PF can be configured to reset the VF through ethtool
- * private flag mdd-auto-reset-vf.
- */
- if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
- /* VF MDD event counters will be cleared by
- * reset, so print the event prior to reset.
- */
- ice_print_vf_rx_mdd_event(vf);
- ice_reset_vf(vf, ICE_VF_RESET_LOCK);
- }
+ reset_vf_rx = true;
}
+
+ if (reset_vf_tx || reset_vf_rx)
+ ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
+ reset_vf_rx);
}
mutex_unlock(&pf->vfs.table_lock);
@@ -2146,7 +2200,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Ensure we have media as we cannot configure a medialess port */
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
- return -EPERM;
+ return -ENOMEDIUM;
ice_print_topo_conflict(vsi);
@@ -3031,6 +3085,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
static void ice_ena_misc_vector(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ u32 pf_intr_start_offset;
u32 val;
/* Disable anti-spoof detection interrupt to prevent spurious event
@@ -3059,6 +3114,47 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
/* SW_ITR_IDX = 0, but don't change INTENA */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
+
+ if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ return;
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
+ GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
+}
+
+/**
+ * ice_ll_ts_intr - ll_ts interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
+{
+ struct ice_pf *pf = data;
+ u32 pf_intr_start_offset;
+ struct ice_ptp_tx *tx;
+ unsigned long flags;
+ struct ice_hw *hw;
+ u32 val;
+ u8 idx;
+
+ hw = &pf->hw;
+ tx = &pf->ptp.port.tx;
+ spin_lock_irqsave(&tx->lock, flags);
+ ice_ptp_complete_tx_single_tstamp(tx);
+
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
+ val);
+
+ return IRQ_HANDLED;
}
/**
@@ -3069,6 +3165,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
{
struct ice_pf *pf = (struct ice_pf *)data;
+ irqreturn_t ret = IRQ_HANDLED;
struct ice_hw *hw = &pf->hw;
struct device *dev;
u32 oicr, ena_mask;
@@ -3108,8 +3205,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* we have a reset warning */
ena_mask &= ~PFINT_OICR_GRST_M;
- reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
- GLGEN_RSTAT_RESET_TYPE_S;
+ reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
+ rd32(hw, GLGEN_RSTAT));
if (reset == ICE_RESET_CORER)
pf->corer_count++;
@@ -3150,8 +3247,22 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
+ if (ice_pf_state_is_nominal(pf) &&
+ pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ unsigned long flags;
+ u8 idx;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock_irqrestore(&tx->lock, flags);
+ } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ ret = IRQ_WAKE_THREAD;
+ }
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3167,7 +3278,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
GLTSYN_STAT_EVENT1_M |
GLTSYN_STAT_EVENT2_M);
- set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
+ ice_ptp_extts_event(pf);
}
}
@@ -3190,8 +3301,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(ICE_PFR_REQ, pf->state);
}
}
+ ice_service_task_schedule(pf);
+ if (ret == IRQ_HANDLED)
+ ice_irq_dynamic_ena(hw, NULL, NULL);
- return IRQ_WAKE_THREAD;
+ return ret;
}
/**
@@ -3207,12 +3321,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
hw = &pf->hw;
if (ice_is_reset_in_progress(pf->state))
- return IRQ_HANDLED;
-
- ice_service_task_schedule(pf);
-
- if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
- ice_ptp_extts_event(pf);
+ goto skip_irq;
if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
/* Process outstanding Tx timestamps. If there is more work,
@@ -3224,6 +3333,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
}
}
+skip_irq:
ice_irq_dynamic_ena(hw, NULL, NULL);
return IRQ_HANDLED;
@@ -3254,6 +3364,20 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
}
/**
+ * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
+ * @pf: board private structure
+ */
+static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
+{
+ int irq_num = pf->ll_ts_irq.virq;
+
+ synchronize_irq(irq_num);
+ devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
+
+ ice_free_irq(pf, pf->ll_ts_irq);
+}
+
+/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure
*/
@@ -3272,6 +3396,8 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
ice_free_irq(pf, pf->oicr_irq);
+ if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ ice_free_irq_msix_ll_ts(pf);
}
/**
@@ -3297,10 +3423,12 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
- /* This enables Sideband queue Interrupt causes */
- val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
- PFINT_SB_CTL_CAUSE_ENA_M);
- wr32(hw, PFINT_SB_CTL, val);
+ if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ /* enable Sideband queue Interrupt causes */
+ val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
+ PFINT_SB_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_SB_CTL, val);
+ }
ice_flush(hw);
}
@@ -3317,13 +3445,17 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- struct msi_map oicr_irq;
+ u32 pf_intr_start_offset;
+ struct msi_map irq;
int err = 0;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
dev_driver_string(dev), dev_name(dev));
+ if (!pf->int_name_ll_ts[0])
+ snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
+ "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
/* Do not request IRQ but do enable OICR interrupt since settings are
* lost during reset. Note that this function is called only during
* rebuild path and not while reset is in progress.
@@ -3332,11 +3464,11 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
goto skip_req_irq;
/* reserve one vector in irq_tracker for misc interrupts */
- oicr_irq = ice_alloc_irq(pf, false);
- if (oicr_irq.index < 0)
- return oicr_irq.index;
+ irq = ice_alloc_irq(pf, false);
+ if (irq.index < 0)
+ return irq.index;
- pf->oicr_irq = oicr_irq;
+ pf->oicr_irq = irq;
err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
ice_misc_intr_thread_fn, 0,
pf->int_name, pf);
@@ -3347,10 +3479,34 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
return err;
}
+ /* reserve one vector in irq_tracker for ll_ts interrupt */
+ if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ goto skip_req_irq;
+
+ irq = ice_alloc_irq(pf, false);
+ if (irq.index < 0)
+ return irq.index;
+
+ pf->ll_ts_irq = irq;
+ err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
+ pf->int_name_ll_ts, pf);
+ if (err) {
+ dev_err(dev, "devm_request_irq for %s failed: %d\n",
+ pf->int_name_ll_ts, err);
+ ice_free_irq(pf, pf->ll_ts_irq);
+ return err;
+ }
+
skip_req_irq:
ice_ena_misc_vector(pf);
ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
+ /* This enables LL TS interrupt */
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ wr32(hw, PFINT_SB_CTL,
+ ((pf->ll_ts_irq.index + pf_intr_start_offset) &
+ PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
@@ -3375,9 +3531,11 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, v_idx)
+ ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
+ __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ }
}
/**
@@ -3397,6 +3555,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
netdev->netdev_ops = &ice_netdev_ops;
netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
+ netdev->xdp_metadata_ops = &ice_xdp_md_ops;
ice_set_ethtool_ops(netdev);
if (vsi->type != ICE_VSI_PF)
@@ -3526,7 +3685,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_PF;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3539,7 +3698,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_CHNL;
- params.pi = pi;
+ params.port_info = pi;
params.ch = ch;
params.flags = ICE_VSI_FLAG_INIT;
@@ -3560,7 +3719,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_CTRL;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3580,7 +3739,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_LB;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -4295,11 +4454,13 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
/**
* ice_request_fw - Device initialization routine
* @pf: pointer to the PF instance
+ * @firmware: double pointer to firmware struct
+ *
+ * Return: zero when successful, negative values otherwise.
*/
-static void ice_request_fw(struct ice_pf *pf)
+static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
{
char *opt_fw_filename = ice_get_opt_fw_name(pf);
- const struct firmware *firmware = NULL;
struct device *dev = ice_pf_to_dev(pf);
int err = 0;
@@ -4308,29 +4469,95 @@ static void ice_request_fw(struct ice_pf *pf)
* and warning messages for other errors.
*/
if (opt_fw_filename) {
- err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
- if (err) {
- kfree(opt_fw_filename);
- goto dflt_pkg_load;
- }
-
- /* request for firmware was successful. Download to device */
- ice_load_pkg(firmware, pf);
+ err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
kfree(opt_fw_filename);
- release_firmware(firmware);
- return;
+ if (!err)
+ return err;
+ }
+ err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
+ if (err)
+ dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
+
+ return err;
+}
+
+/**
+ * ice_init_tx_topology - performs Tx topology initialization
+ * @hw: pointer to the hardware structure
+ * @firmware: pointer to firmware structure
+ *
+ * Return: zero when init was successful, negative values otherwise.
+ */
+static int
+ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
+{
+ u8 num_tx_sched_layers = hw->num_tx_sched_layers;
+ struct ice_pf *pf = hw->back;
+ struct device *dev;
+ u8 *buf_copy;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ /* ice_cfg_tx_topo buf argument is not a constant,
+ * so we have to make a copy
+ */
+ buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
+
+ err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
+ if (!err) {
+ if (hw->num_tx_sched_layers > num_tx_sched_layers)
+ dev_info(dev, "Tx scheduling layers switching feature disabled\n");
+ else
+ dev_info(dev, "Tx scheduling layers switching feature enabled\n");
+ /* if there was a change in topology ice_cfg_tx_topo triggered
+ * a CORER and we need to re-init hw
+ */
+ ice_deinit_hw(hw);
+ err = ice_init_hw(hw);
+
+ return err;
+ } else if (err == -EIO) {
+ dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
}
-dflt_pkg_load:
- err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
+ return 0;
+}
+
+/**
+ * ice_init_ddp_config - DDP related configuration
+ * @hw: pointer to the hardware structure
+ * @pf: pointer to pf structure
+ *
+ * This function loads DDP file from the disk, then initializes Tx
+ * topology. At the end DDP package is loaded on the card.
+ *
+ * Return: zero when init was successful, negative values otherwise.
+ */
+static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const struct firmware *firmware = NULL;
+ int err;
+
+ err = ice_request_fw(pf, &firmware);
if (err) {
- dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
- return;
+ dev_err(dev, "Fail during requesting FW: %d\n", err);
+ return err;
}
- /* request for firmware was successful. Download to device */
+ err = ice_init_tx_topology(hw, firmware);
+ if (err) {
+ dev_err(dev, "Fail during initialization of Tx topology: %d\n",
+ err);
+ release_firmware(firmware);
+ return err;
+ }
+
+ /* Download firmware to device */
ice_load_pkg(firmware, pf);
release_firmware(firmware);
+
+ return 0;
}
/**
@@ -4361,6 +4588,19 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
+ * ice_pf_fwlog_update_module - update 1 module
+ * @pf: pointer to the PF struct
+ * @log_level: log_level to use for the @module
+ * @module: module to update
+ */
+void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ hw->fwlog_cfg.module_entries[module].log_level = log_level;
+}
+
+/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -4439,90 +4679,6 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-static int ice_start_eth(struct ice_vsi *vsi)
-{
- int err;
-
- err = ice_init_mac_fltr(vsi->back);
- if (err)
- return err;
-
- err = ice_vsi_open(vsi);
- if (err)
- ice_fltr_remove_all(vsi);
-
- return err;
-}
-
-static void ice_stop_eth(struct ice_vsi *vsi)
-{
- ice_fltr_remove_all(vsi);
- ice_vsi_close(vsi);
-}
-
-static int ice_init_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- int err;
-
- if (!vsi)
- return -EINVAL;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- err = ice_cfg_netdev(vsi);
- if (err)
- return err;
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- err = ice_init_mac_fltr(pf);
- if (err)
- goto err_init_mac_fltr;
-
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create_pf_port;
-
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
-
- err = ice_register_netdev(vsi);
- if (err)
- goto err_register_netdev;
-
- err = ice_tc_indir_block_register(vsi);
- if (err)
- goto err_tc_indir_block_register;
-
- ice_napi_add(vsi);
-
- return 0;
-
-err_tc_indir_block_register:
- ice_unregister_netdev(vsi);
-err_register_netdev:
- ice_devlink_destroy_pf_port(pf);
-err_devlink_create_pf_port:
-err_init_mac_fltr:
- ice_decfg_netdev(vsi);
- return err;
-}
-
-static void ice_deinit_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
-
- if (!vsi)
- return;
-
- ice_vsi_close(vsi);
- ice_unregister_netdev(vsi);
- ice_devlink_destroy_pf_port(pf);
- ice_tc_indir_block_unregister(vsi);
- ice_decfg_netdev(vsi);
-}
-
/**
* ice_wait_for_fw - wait for full FW readiness
* @hw: pointer to the hardware structure
@@ -4548,7 +4704,7 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
return -ETIMEDOUT;
}
-static int ice_init_dev(struct ice_pf *pf)
+int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -4574,9 +4730,11 @@ static int ice_init_dev(struct ice_pf *pf)
ice_init_feature_support(pf);
- ice_request_fw(pf);
+ err = ice_init_ddp_config(hw, pf);
+ if (err)
+ return err;
- /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
+ /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/
@@ -4641,7 +4799,7 @@ err_init_pf:
return err;
}
-static void ice_deinit_dev(struct ice_pf *pf)
+void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
ice_deinit_pf(pf);
@@ -4685,6 +4843,8 @@ static void ice_init_features(struct ice_pf *pf)
if (ice_init_lag(pf))
dev_warn(dev, "Failed to init link aggregation support\n");
+
+ ice_hwmon_init(pf);
}
static void ice_deinit_features(struct ice_pf *pf)
@@ -4702,6 +4862,8 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_ptp_release(pf);
if (test_bit(ICE_FLAG_DPLL, pf->flags))
ice_dpll_deinit(pf);
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ xa_destroy(&pf->eswitch.reprs);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -4942,31 +5104,47 @@ static void ice_deinit(struct ice_pf *pf)
/**
* ice_load - load pf by init hw and starting VSI
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
int ice_load(struct ice_pf *pf)
{
- struct ice_vsi_cfg_params params = {};
struct ice_vsi *vsi;
int err;
- err = ice_init_dev(pf);
+ devl_assert_locked(priv_to_devlink(pf));
+
+ vsi = ice_get_main_vsi(pf);
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
if (err)
return err;
- vsi = ice_get_main_vsi(pf);
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
- rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_devlink_create_pf_port(pf);
if (err)
- goto err_vsi_cfg;
+ goto err_devlink_create_pf_port;
+
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
- err = ice_start_eth(ice_get_main_vsi(pf));
+ err = ice_register_netdev(vsi);
if (err)
- goto err_start_eth;
- rtnl_unlock();
+ goto err_register_netdev;
+
+ err = ice_tc_indir_block_register(vsi);
+ if (err)
+ goto err_tc_indir_block_register;
+
+ ice_napi_add(vsi);
err = ice_init_rdma(pf);
if (err)
@@ -4980,29 +5158,35 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
- ice_vsi_close(ice_get_main_vsi(pf));
- rtnl_lock();
-err_start_eth:
- ice_vsi_decfg(ice_get_main_vsi(pf));
-err_vsi_cfg:
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
return err;
}
/**
* ice_unload - unload pf by stopping VSI and deinit hw
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
void ice_unload(struct ice_pf *pf)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ devl_assert_locked(priv_to_devlink(pf));
+
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- rtnl_lock();
- ice_stop_eth(ice_get_main_vsi(pf));
- ice_vsi_decfg(ice_get_main_vsi(pf));
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_decfg_netdev(vsi);
}
/**
@@ -5016,6 +5200,7 @@ static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
+ struct ice_adapter *adapter;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
@@ -5068,7 +5253,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pci_set_master(pdev);
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter))
+ return PTR_ERR(adapter);
+
pf->pdev = pdev;
+ pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5100,29 +5290,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err)
goto err_init;
- err = ice_init_eth(pf);
+ devl_lock(priv_to_devlink(pf));
+ err = ice_load(pf);
if (err)
- goto err_init_eth;
-
- err = ice_init_rdma(pf);
- if (err)
- goto err_init_rdma;
+ goto err_load;
err = ice_init_devlink(pf);
if (err)
goto err_init_devlink;
-
- ice_init_features(pf);
+ devl_unlock(priv_to_devlink(pf));
return 0;
err_init_devlink:
- ice_deinit_rdma(pf);
-err_init_rdma:
- ice_deinit_eth(pf);
-err_init_eth:
+ ice_unload(pf);
+err_load:
+ devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
err_init:
+ ice_adapter_put(pdev);
pci_disable_device(pdev);
return err;
}
@@ -5208,23 +5394,28 @@ static void ice_remove(struct pci_dev *pdev)
ice_free_vfs(pf);
}
+ ice_hwmon_exit(pf);
+
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
set_bit(ICE_DOWN, pf->state);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_deinit_features(pf);
+
+ devl_lock(priv_to_devlink(pf));
ice_deinit_devlink(pf);
- ice_deinit_rdma(pf);
- ice_deinit_eth(pf);
- ice_deinit(pf);
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+
+ ice_deinit(pf);
ice_vsi_release_all(pf);
ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
+ ice_adapter_put(pdev);
pci_disable_device(pdev);
}
@@ -5244,7 +5435,6 @@ static void ice_shutdown(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PM
/**
* ice_prepare_for_shutdown - prep for PCI shutdown
* @pf: board private structure
@@ -5306,6 +5496,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ ice_vsi_set_napi_queues(pf->vsi[v]);
}
ret = ice_req_irq_msix_misc(pf);
@@ -5332,7 +5523,7 @@ err_reinit:
* Power Management callback to quiesce the device and prepare
* for D3 transition.
*/
-static int __maybe_unused ice_suspend(struct device *dev)
+static int ice_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ice_pf *pf;
@@ -5399,7 +5590,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
* ice_resume - PM callback for waking up from D3
* @dev: generic device information structure
*/
-static int __maybe_unused ice_resume(struct device *dev)
+static int ice_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
enum ice_reset_req reset_type;
@@ -5450,7 +5641,6 @@ static int __maybe_unused ice_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
/**
* ice_pci_err_detected - warning that PCI error has been detected
@@ -5611,16 +5801,26 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
/* required last entry */
{}
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
-static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
static const struct pci_error_handlers ice_pci_err_handler = {
.error_detected = ice_pci_err_detected,
@@ -5635,9 +5835,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
-#ifdef CONFIG_PM
- .driver.pm = &ice_pm_ops,
-#endif /* CONFIG_PM */
+ .driver.pm = pm_sleep_ptr(&ice_pm_ops),
.shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
@@ -5672,6 +5870,8 @@ static int __init ice_module_init(void)
goto err_dest_wq;
}
+ ice_debugfs_init();
+
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register PCI driver, err %d\n", status);
@@ -5682,6 +5882,7 @@ static int __init ice_module_init(void)
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
+ ice_debugfs_exit();
err_dest_wq:
destroy_workqueue(ice_wq);
return status;
@@ -5697,6 +5898,7 @@ module_init(ice_module_init);
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
+ ice_debugfs_exit();
destroy_workqueue(ice_wq);
destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
@@ -6041,6 +6243,23 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
}
/**
+ * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
+ * @vsi: PF's VSI
+ * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
+ *
+ * Store current stripped VLAN proto in ring packet context,
+ * so it can be accessed more efficiently by packet processing code.
+ */
+static void
+ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
+{
+ u16 i;
+
+ ice_for_each_alloc_rxq(vsi, i)
+ vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
+}
+
+/**
* ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
* @vsi: PF's VSI
* @features: features used to determine VLAN offload settings
@@ -6082,6 +6301,9 @@ ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
if (strip_err || insert_err)
return -EIO;
+ ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
+ htons(vlan_ethertype) : 0);
+
return 0;
}
@@ -6572,6 +6794,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
+ struct ice_pf *pf = vsi->back;
u64 pkts, bytes;
int i;
@@ -6617,21 +6840,18 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
net_stats = &vsi->net_stats;
stats_prev = &vsi->net_stats_prev;
- /* clear prev counters after reset */
- if (vsi_stats->tx_packets < stats_prev->tx_packets ||
- vsi_stats->rx_packets < stats_prev->rx_packets) {
- stats_prev->tx_packets = 0;
- stats_prev->tx_bytes = 0;
- stats_prev->rx_packets = 0;
- stats_prev->rx_bytes = 0;
+ /* Update netdev counters, but keep in mind that values could start at
+ * random value after PF reset. And as we increase the reported stat by
+ * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
+ * let's skip this round.
+ */
+ if (likely(pf->stat_prev_loaded)) {
+ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
}
- /* update netdev counters */
- net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
- net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
- net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
- net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
-
stats_prev->tx_packets = vsi_stats->tx_packets;
stats_prev->tx_bytes = vsi_stats->tx_bytes;
stats_prev->rx_packets = vsi_stats->rx_packets;
@@ -6670,13 +6890,11 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes +
- pf->stats.rx_len_errors +
pf->stats.rx_undersize +
pf->hw_csum_rx_error +
pf->stats.rx_jabber +
pf->stats.rx_fragments +
pf->stats.rx_oversize;
- cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
}
@@ -6816,9 +7034,6 @@ void ice_update_pf_stats(struct ice_pf *pf)
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
- ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
- &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
-
ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
@@ -6901,6 +7116,50 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+static void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each Rx queue; Tx queues are
+ * handled in ice_vsi_stop_tx_ring()
+ */
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ ice_for_each_q_vector(vsi, i) {
+ if (!vsi->q_vectors[i])
+ continue;
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ }
+
+ ice_flush(hw);
+
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(vsi->q_vectors[i]->irq.virq);
+}
+
+/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*
@@ -6912,13 +7171,11 @@ int ice_down(struct ice_vsi *vsi)
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
- } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
- ice_eswitch_stop_all_tx_queues(vsi->back);
}
ice_vsi_dis_irq(vsi);
@@ -7389,7 +7646,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_reset(pf);
+ ice_ptp_rebuild(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
@@ -7401,20 +7658,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- /* configure PTP timestamping after VSI rebuild */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) {
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
- ice_ptp_cfg_timestamp(pf, false);
- else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL)
- /* for E82x PHC owner always need to have interrupts */
- ice_ptp_cfg_timestamp(pf, true);
- }
-
- err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
- if (err) {
- dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
- goto err_vsi_rebuild;
- }
+ ice_eswitch_rebuild(pf);
if (reset_type == ICE_RESET_PFR) {
err = ice_rebuild_channels(pf);
@@ -7461,6 +7705,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
+
+ /* Restore timestamp mode settings after VSI rebuild */
+ ice_ptp_restore_timestamp_mode(pf);
return;
err_vsi_rebuild:
@@ -7529,7 +7776,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- netdev->mtu = (unsigned int)new_mtu;
+ WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
err = ice_down_up(vsi);
if (err)
return err;
@@ -7710,6 +7957,59 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
}
/**
+ * ice_set_rss_hfunc - Set RSS HASH function
+ * @vsi: Pointer to VSI structure
+ * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctx;
+ bool symm;
+ int err;
+
+ if (hfunc == vsi->rss_hfunc)
+ return 0;
+
+ if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
+ hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
+ return -EOPNOTSUPP;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+ ctx->info.q_opt_rss = vsi->info.q_opt_rss;
+ ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
+ ctx->info.q_opt_rss |=
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
+ vsi->vsi_num, err);
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ vsi->rss_hfunc = hfunc;
+ netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
+ hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
+ "Symmetric " : "");
+ }
+ kfree(ctx);
+ if (err)
+ return err;
+
+ /* Fix the symmetry setting for all existing RSS configurations */
+ symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
+ return ice_set_rss_cfg_symm(hw, vsi, symm);
+}
+
+/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
* @pid: process ID
@@ -7806,13 +8106,12 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
- mode = nla_get_u16(attr);
if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
return -EINVAL;
/* Continue if bridge mode is not being flipped */
@@ -7895,8 +8194,8 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct ice_hw *hw = &pf->hw;
u32 head, val = 0;
- head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
- QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
+ head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
+ rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
@@ -8144,13 +8443,12 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
enum ice_flow_priority prio;
- u64 prof_id;
/* add this VSI to FDir profile for this flow */
prio = ICE_FLOW_PRIO_NORMAL;
prof = hw->fdir_prof[flow];
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ status = ice_flow_add_entry(hw, ICE_BLK_FD,
+ prof->prof_id[tun],
prof->vsi_h[0], vsi->idx,
prio, prof->fdir_seg[tun],
&entry_h);
@@ -9193,8 +9491,14 @@ int ice_stop(struct net_device *netdev)
int link_err = ice_force_phys_link_state(vsi, false);
if (link_err) {
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
+ if (link_err == -ENOMEDIUM)
+ netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
+ vsi->vsi_num);
+ else
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+
+ ice_vsi_close(vsi);
return -EIO;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index f6f52a248066..84eab92dc03c 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -18,10 +18,9 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-static int
-ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
- void *data, bool last_command, bool read_shadow_ram,
- struct ice_sq_cd *cd)
+int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
@@ -571,8 +570,8 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
return status;
}
- nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
- nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
+ nvm->major = FIELD_GET(ICE_NVM_VER_HI_MASK, ver);
+ nvm->minor = FIELD_GET(ICE_NVM_VER_LO_MASK, ver);
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
if (status) {
@@ -706,9 +705,9 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
combo_ver = le32_to_cpu(civd.combo_ver);
- orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT);
- orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
- orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT);
+ orom->major = FIELD_GET(ICE_OROM_VER_MASK, combo_ver);
+ orom->patch = FIELD_GET(ICE_OROM_VER_PATCH_MASK, combo_ver);
+ orom->build = FIELD_GET(ICE_OROM_VER_BUILD_MASK, combo_ver);
return 0;
}
@@ -950,7 +949,8 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
}
/* Check that the control word indicates validity */
- if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
+ if (FIELD_GET(ICE_SR_CTRL_WORD_1_M, ctrl_word) !=
+ ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
return -EIO;
}
@@ -1027,7 +1027,7 @@ int ice_init_nvm(struct ice_hw *hw)
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);
- sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
/* Switching to words (sr_size contains power of 2) */
flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 774c2317967d..63cdc6bdac58 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -14,6 +14,9 @@ struct ice_orom_civd_info {
int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
+int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram, struct ice_sq_cd *cd);
int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index 82bc54fec7f3..a2562f04267f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -24,7 +24,7 @@
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define ice_flush(a) rd32((a), GLGEN_STAT)
-#define ICE_M(m, s) ((m) << (s))
+#define ICE_M(m, s) ((m ## U) << (s))
struct ice_dma_mem {
void *va;
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index f6f27361c3cf..755a9c55267c 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -43,6 +43,7 @@ enum ice_protocol_type {
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
+ ICE_PFCP,
ICE_PPPOE,
ICE_L2TPV3,
ICE_VLAN_EX,
@@ -61,6 +62,7 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
+ ICE_SW_TUN_PFCP,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -202,6 +204,15 @@ struct ice_udp_gtp_hdr {
u8 rsvrd;
};
+struct ice_pfcp_hdr {
+ u8 flags;
+ u8 msg_type;
+ __be16 length;
+ __be64 seid;
+ __be32 seq;
+ u8 spare;
+} __packed __aligned(__alignof__(u16));
+
struct ice_pppoe_hdr {
u8 rsrvd_ver_type;
u8 rsrvd_code;
@@ -418,6 +429,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pfcp_hdr pfcp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
struct ice_hw_metadata metadata;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 1eddcbe89b0c..0f17fc1181d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -7,7 +7,7 @@
#define E810_OUT_PROP_DELAY_NS 1
-#define UNKNOWN_INCVAL_E822 0x100000000ULL
+#define UNKNOWN_INCVAL_E82X 0x100000000ULL
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
@@ -256,48 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
}
/**
- * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamp interrupt is enabled or disabled
+ * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
+ * @pf: Board private structure
+ *
+ * Program the device to respond appropriately to the Tx timestamp interrupt
+ * cause.
*/
-static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on)
+static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
{
+ struct ice_hw *hw = &pf->hw;
+ bool enable;
u32 val;
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ /* React to interrupts across all quads. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
+ enable = true;
+ break;
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ /* Do not react to interrupts on any quad. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
+ enable = false;
+ break;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ default:
+ enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
+ break;
+ }
+
/* Configure the Tx timestamp interrupt */
- val = rd32(&pf->hw, PFINT_OICR_ENA);
- if (on)
+ val = rd32(hw, PFINT_OICR_ENA);
+ if (enable)
val |= PFINT_OICR_TSYN_TX_M;
else
val &= ~PFINT_OICR_TSYN_TX_M;
- wr32(&pf->hw, PFINT_OICR_ENA, val);
-}
-
-/**
- * ice_set_tx_tstamp - Enable or disable Tx timestamping
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamps are enabled or disabled
- */
-static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
-{
- struct ice_vsi *vsi;
- u16 i;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return;
-
- /* Set the timestamp enable flag for all the Tx rings */
- ice_for_each_txq(vsi, i) {
- if (!vsi->tx_rings[i])
- continue;
- vsi->tx_rings[i]->ptp_tx = on;
- }
-
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
- ice_ptp_configure_tx_tstamp(pf, on);
-
- pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ wr32(hw, PFINT_OICR_ENA, val);
}
/**
@@ -311,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
u16 i;
vsi = ice_get_main_vsi(pf);
- if (!vsi)
+ if (!vsi || !vsi->rx_rings)
return;
/* Set the timestamp flag for all the Rx rings */
@@ -320,23 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
+}
- pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
- HWTSTAMP_FILTER_NONE;
+/**
+ * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
+ * @pf: Board private structure
+ *
+ * Called during preparation for reset to temporarily disable timestamping on
+ * the device. Called during remove to disable timestamping while cleaning up
+ * driver resources.
+ */
+static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ val = rd32(hw, PFINT_OICR_ENA);
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(hw, PFINT_OICR_ENA, val);
+
+ ice_set_rx_tstamp(pf, false);
}
/**
- * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
+ * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
* @pf: Board private structure
- * @ena: bool value to enable or disable time stamp
*
- * This function will configure timestamping during PTP initialization
- * and deinitialization
+ * Called at the end of rebuild to restore timestamp configuration after
+ * a device reset.
*/
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
{
- ice_set_tx_tstamp(pf, ena);
- ice_set_rx_tstamp(pf, ena);
+ struct ice_hw *hw = &pf->hw;
+ bool enable_rx;
+
+ ice_ptp_cfg_tx_interrupt(pf);
+
+ enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+ ice_set_rx_tstamp(pf, enable_rx);
+
+ /* Trigger an immediate software interrupt to ensure that timestamps
+ * which occurred during reset are handled now.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
}
/**
@@ -353,6 +374,7 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
u8 tmr_idx;
tmr_idx = ice_get_ptp_src_clock_index(hw);
+ guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
@@ -504,6 +526,115 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
+ * @tx: the PTP Tx timestamp tracker
+ * @idx: index of the timestamp to request
+ */
+void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
+{
+ struct ice_ptp_port *ptp_port;
+ struct sk_buff *skb;
+ struct ice_pf *pf;
+
+ if (!tx->init)
+ return;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ /* Drop packets which have waited for more than 2 seconds */
+ if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
+ /* Count the number of Tx timestamps that timed out */
+ pf->ptp.tx_hwtstamp_timeouts++;
+
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ wr32(&pf->hw, PF_SB_ATQBAL,
+ TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
+ TS_LL_READ_TS);
+ tx->last_ll_ts_idx_read = idx;
+}
+
+/**
+ * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
+ * @tx: the PTP Tx timestamp tracker
+ */
+void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
+{
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_ptp_port *ptp_port;
+ u64 raw_tstamp, tstamp;
+ bool drop_ts = false;
+ struct sk_buff *skb;
+ struct ice_pf *pf;
+ u32 val;
+
+ if (!tx->init || tx->last_ll_ts_idx_read < 0)
+ return;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
+ val = rd32(&pf->hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (val & TS_LL_READ_TS) {
+ dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
+ return;
+ }
+
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ raw_tstamp <<= 32;
+
+ /* Read the low 32 bit value */
+ raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+
+ /* Devices using this interface always verify the timestamp differs
+ * relative to the last cached timestamp value.
+ */
+ if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ return;
+
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ if (test_and_clear_bit(idx, tx->stale))
+ drop_ts = true;
+
+ if (!skb)
+ return;
+
+ if (drop_ts) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ if (tstamp) {
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+ }
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+/**
* ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
* @tx: the PTP Tx timestamp tracker
*
@@ -554,6 +685,7 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
{
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
@@ -566,9 +698,11 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
hw = &pf->hw;
/* Read the Tx ready status first */
- err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
- if (err)
- return;
+ if (tx->has_ready_bitmap) {
+ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
+ if (err)
+ return;
+ }
/* Drop packets if the link went down */
link_up = ptp_port->link_up;
@@ -596,7 +730,8 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* If we do not, the hardware logic for generating a new
* interrupt can get stuck on some devices.
*/
- if (!(tstamp_ready & BIT_ULL(phy_idx))) {
+ if (tx->has_ready_bitmap &&
+ !(tstamp_ready & BIT_ULL(phy_idx))) {
if (drop_ts)
goto skip_ts_read;
@@ -616,7 +751,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* from the last cached timestamp. If it is not, skip this for
* now assuming it hasn't yet been captured by hardware.
*/
- if (!drop_ts && tx->verify_cached &&
+ if (!drop_ts && !tx->has_ready_bitmap &&
raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
@@ -625,15 +760,15 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
drop_ts = true;
skip_ts_read:
- spin_lock(&tx->lock);
- if (tx->verify_cached && raw_tstamp)
+ spin_lock_irqsave(&tx->lock, flags);
+ if (!tx->has_ready_bitmap && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
if (test_and_clear_bit(idx, tx->stale))
drop_ts = true;
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* It is unlikely but possible that the SKB will have been
* flushed at this point due to link change or teardown.
@@ -684,7 +819,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
/* Read the Tx ready status first */
err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
- if (err || tstamp_ready)
+ if (err)
+ break;
+ else if (tstamp_ready)
return ICE_TX_TSTAMP_WORK_PENDING;
}
@@ -701,6 +838,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
{
bool more_timestamps;
+ unsigned long flags;
if (!tx->init)
return ICE_TX_TSTAMP_WORK_DONE;
@@ -709,9 +847,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
ice_ptp_process_tx_tstamp(tx);
/* Check if there are outstanding Tx timestamps */
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
if (more_timestamps)
return ICE_TX_TSTAMP_WORK_PENDING;
@@ -748,6 +886,7 @@ ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
tx->in_use = in_use;
tx->stale = stale;
tx->init = 1;
+ tx->last_ll_ts_idx_read = -1;
spin_lock_init(&tx->lock);
@@ -765,6 +904,7 @@ static void
ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
struct ice_hw *hw = &pf->hw;
+ unsigned long flags;
u64 tstamp_ready;
int err;
u8 idx;
@@ -788,12 +928,12 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
ice_clear_phy_tstamp(hw, tx->block, phy_idx);
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
clear_bit(idx, tx->in_use);
clear_bit(idx, tx->stale);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* Count the number of Tx timestamps flushed */
pf->ptp.tx_hwtstamp_flushed++;
@@ -817,9 +957,27 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
{
- spin_lock(&tx->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
+}
+
+/**
+ * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
+ * @pf: Board private structure
+ *
+ * Called by the clock owner to flush all the Tx timestamp trackers associated
+ * with the clock.
+ */
+static void
+ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+
+ list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
+ ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
}
/**
@@ -832,9 +990,11 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
static void
ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
- spin_lock(&tx->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
tx->init = 0;
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* wait for potentially outstanding interrupt to complete */
synchronize_irq(pf->oicr_irq.virq);
@@ -854,7 +1014,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
* @port: the port this structure tracks
@@ -865,12 +1025,12 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
* registers into chunks based on the port number.
*/
static int
-ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
tx->block = port / ICE_PORTS_PER_QUAD;
- tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822;
- tx->len = INDEX_PER_PORT_E822;
- tx->verify_cached = 0;
+ tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
+ tx->len = INDEX_PER_PORT_E82X;
+ tx->has_ready_bitmap = 1;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -893,7 +1053,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->verify_cached = 1;
+ tx->has_ready_bitmap = 0;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1007,26 +1167,6 @@ static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
}
/**
- * ice_ptp_read_time - Read the time from the device
- * @pf: Board private structure
- * @ts: timespec structure to hold the current time value
- * @sts: Optional parameter for holding a pair of system timestamps from
- * the system clock. Will be ignored if NULL is given.
- *
- * This function reads the source clock registers and stores them in a timespec.
- * However, since the registers are 64 bits of nanoseconds, we must convert the
- * result to a timespec before we can return.
- */
-static void
-ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
- struct ptp_system_timestamp *sts)
-{
- u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
-
- *ts = ns_to_timespec64(time_ns);
-}
-
-/**
* ice_ptp_write_init - Set PHC time to provided value
* @pf: Board private structure
* @ts: timespec structure that holds the new time value
@@ -1072,10 +1212,10 @@ static u64 ice_base_incval(struct ice_pf *pf)
if (ice_is_e810(hw))
incval = ICE_PTP_NOMINAL_INCVAL_E810;
- else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
- incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
+ else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
+ incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
else
- incval = UNKNOWN_INCVAL_E822;
+ incval = UNKNOWN_INCVAL_E82X;
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
@@ -1104,10 +1244,10 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
/* need to read FIFO state */
if (offs == 0 || offs == 1)
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
&val);
else
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
&val);
if (err) {
@@ -1117,9 +1257,9 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
}
if (offs & 0x1)
- phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
+ phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
else
- phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
+ phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
if (phy_sts & FIFO_EMPTY) {
port->tx_fifo_busy_cnt = FIFO_OK;
@@ -1135,7 +1275,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
dev_dbg(ice_pf_to_dev(pf),
"Port %d Tx FIFO still not empty; resetting quad %d\n",
port->port_num, quad);
- ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
port->tx_fifo_busy_cnt = FIFO_OK;
return 0;
}
@@ -1180,8 +1320,8 @@ static void ice_ptp_wait_for_offsets(struct kthread_work *work)
tx_err = ice_ptp_check_tx_fifo(port);
if (!tx_err)
- tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num);
- rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num);
+ tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
+ rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
if (tx_err || rx_err) {
/* Tx and/or Rx offset not yet configured, try again later */
kthread_queue_delayed_work(pf->ptp.kworker,
@@ -1210,7 +1350,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- err = ice_stop_phy_timer_e822(hw, port, true);
+ err = ice_stop_phy_timer_e82x(hw, port, true);
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
@@ -1234,6 +1374,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
struct ice_pf *pf = ptp_port_to_pf(ptp_port);
u8 port = ptp_port->port_num;
struct ice_hw *hw = &pf->hw;
+ unsigned long flags;
int err;
if (ice_is_e810(hw))
@@ -1247,20 +1388,20 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
/* temporarily disable Tx timestamps while calibrating PHY offset */
- spin_lock(&ptp_port->tx.lock);
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
ptp_port->tx.calibrating = true;
- spin_unlock(&ptp_port->tx.lock);
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
ptp_port->tx_fifo_busy_cnt = 0;
/* Start the PHY timer in Vernier mode */
- err = ice_start_phy_timer_e822(hw, port);
+ err = ice_start_phy_timer_e82x(hw, port);
if (err)
goto out_unlock;
/* Enable Tx timestamps right away */
- spin_lock(&ptp_port->tx.lock);
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
ptp_port->tx.calibrating = false;
- spin_unlock(&ptp_port->tx.lock);
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
@@ -1285,7 +1426,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
@@ -1302,7 +1443,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
return;
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1311,14 +1452,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
}
/**
- * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
* @pf: PF private structure
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
* Utility function to enable or disable Tx timestamp interrupt and threshold
*/
-static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
struct ice_hw *hw = &pf->hw;
int err = 0;
@@ -1328,7 +1469,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
&val);
if (err)
break;
@@ -1336,13 +1477,13 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
if (ena) {
val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
- val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
- Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
+ val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
+ threshold);
} else {
val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
}
- err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
val);
if (err)
break;
@@ -1482,8 +1623,7 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
* + num_in_channels * tmr_idx
*/
func = 1 + chan + (tmr_idx * 3);
- gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
- GLGEN_GPIO_CTL_PIN_FUNC_M);
+ gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
pf->ptp.ext_ts_chan |= (1 << chan);
} else {
/* clear the values we set to reset defaults */
@@ -1580,7 +1720,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
if (ice_is_e810(hw))
start_time -= E810_OUT_PROP_DELAY_NS;
else
- start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
+ start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
/* 2. Write TARGET time */
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
@@ -1593,7 +1733,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
/* 4. write GPIO CTL reg */
func = 8 + chan + (tmr_idx * 4);
val = GLGEN_GPIO_CTL_PIN_DIR_M |
- ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
+ FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
/* Store the value if requested */
@@ -1766,16 +1906,10 @@ ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_hw *hw = &pf->hw;
-
- if (!ice_ptp_lock(hw)) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
- return -EBUSY;
- }
-
- ice_ptp_read_time(pf, ts, sts);
- ice_ptp_unlock(hw);
+ u64 time_ns;
+ time_ns = ice_ptp_read_src_clk_reg(pf, sts);
+ *ts = ns_to_timespec64(time_ns);
return 0;
}
@@ -1819,7 +1953,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_clkout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->phy_model == ICE_PHY_E822)
+ if (hw->phy_model == ICE_PHY_E82X)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2018,7 +2152,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
config = &pf->ptp.tstamp_config;
@@ -2037,10 +2171,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- ice_set_tx_tstamp(pf, false);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ON:
- ice_set_tx_tstamp(pf, true);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
break;
default:
return -ERANGE;
@@ -2048,7 +2182,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- ice_set_rx_tstamp(pf, false);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
@@ -2064,12 +2198,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- ice_set_rx_tstamp(pf, true);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
+ /* Immediately update the device timestamping mode */
+ ice_ptp_restore_timestamp_mode(pf);
+
return 0;
}
@@ -2085,7 +2222,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -2103,30 +2240,26 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
}
/**
- * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
- * @rx_ring: Ring to get the VSI info
+ * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
* @rx_desc: Receive descriptor
- * @skb: Particular skb to send timestamp with
+ * @pkt_ctx: Packet context to get the cached time
*
* The driver receives a notification in the receive descriptor with timestamp.
- * The timestamp is in ns, so we must convert the result first.
*/
-void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
+u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx)
{
- struct skb_shared_hwtstamps *hwtstamps;
u64 ts_ns, cached_time;
u32 ts_high;
if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
- return;
+ return 0;
- cached_time = READ_ONCE(rx_ring->cached_phctime);
+ cached_time = READ_ONCE(pkt_ctx->cached_phctime);
/* Do not report a timestamp if we don't have a cached PHC time */
if (!cached_time)
- return;
+ return 0;
/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
* PHC value, rather than accessing the PF. This also allows us to
@@ -2137,9 +2270,7 @@ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
+ return ts_ns;
}
/**
@@ -2354,18 +2485,23 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
*/
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
{
+ unsigned long flags;
u8 idx;
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
/* Check that this tracker is accepting new timestamp requests */
if (!ice_ptp_is_tx_tracker_up(tx)) {
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
return -1;
}
/* Find and set the first available index */
- idx = find_first_zero_bit(tx->in_use, tx->len);
+ idx = find_next_zero_bit(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx == tx->len)
+ idx = find_first_zero_bit(tx->in_use, tx->len);
+
if (idx < tx->len) {
/* We got a valid index that no other thread could have set. Store
* a reference to the skb and the start time to allow discarding old
@@ -2379,7 +2515,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
ice_trace(tx_tstamp_request, skb, idx);
}
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* return the appropriate PHY timestamp register index, -1 if no
* indexes were available.
@@ -2416,55 +2552,139 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
}
+/**
+ * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
+ * @pf: Board private structure
+ *
+ * The device PHY issues Tx timestamp interrupts to the driver for processing
+ * timestamp data from the PHY. It will not interrupt again until all
+ * current timestamp data is read. In rare circumstances, it is possible that
+ * the driver fails to read all outstanding data.
+ *
+ * To avoid getting permanently stuck, periodically check if the PHY has
+ * outstanding timestamp data. If so, trigger an interrupt from software to
+ * process this data.
+ */
+static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ bool trigger_oicr = false;
+ unsigned int i;
+
+ if (ice_is_e810(hw))
+ return;
+
+ if (!ice_pf_src_tmr_owned(pf))
+ return;
+
+ for (i = 0; i < ICE_MAX_QUAD; i++) {
+ u64 tstamp_ready;
+ int err;
+
+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
+ if (!err && tstamp_ready) {
+ trigger_oicr = true;
+ break;
+ }
+ }
+
+ if (trigger_oicr) {
+ /* Trigger a software interrupt, to ensure this data
+ * gets processed.
+ */
+ dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
+
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+}
+
static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
err = ice_ptp_update_cached_phctime(pf);
+ ice_ptp_maybe_trigger_tx_interrupt(pf);
+
/* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
msecs_to_jiffies(err ? 10 : 500));
}
/**
- * ice_ptp_reset - Initialize PTP hardware clock support after reset
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
+ * @reset_type: the reset type being performed
*/
-void ice_ptp_reset(struct ice_pf *pf)
+void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
+
+ if (ptp->state != ICE_PTP_READY)
+ return;
+
+ ptp->state = ICE_PTP_RESETTING;
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_disable_timestamp_mode(pf);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+
+ if (reset_type == ICE_RESET_PFR)
+ return;
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
+
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
+
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
+}
+
+/**
+ * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
+ * @pf: Board private structure
+ *
+ * Companion function for ice_ptp_rebuild() which handles tasks that only the
+ * PTP clock owner instance should perform.
+ */
+static int ice_ptp_rebuild_owner(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
u64 time_diff;
-
- if (test_bit(ICE_PFR_REQ, pf->state))
- goto pfr;
-
- if (!ice_pf_src_tmr_owned(pf))
- goto reset_ts;
+ int err;
err = ice_ptp_init_phc(hw);
if (err)
- goto err;
+ return err;
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
- goto err;
+ return err;
}
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Write the initial Time value to PHY and LAN using the cached PHC
@@ -2480,37 +2700,54 @@ void ice_ptp_reset(struct ice_pf *pf)
err = ice_ptp_write_init(pf, &ts);
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ /* Flush software tracking of any outstanding timestamps since we're
+ * about to flush the PHY timestamp block.
+ */
+ ice_ptp_flush_all_tx_tracker(pf);
+
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
- goto err;
+ return err;
+
+ ice_ptp_restart_all_phy(pf);
}
-reset_ts:
- /* Restart the PHY timestamping block */
- ice_ptp_reset_phy_timestamping(pf);
+ return 0;
+}
-pfr:
- /* Init Tx structures */
- if (ice_is_e810(&pf->hw)) {
- err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
- } else {
- kthread_init_delayed_work(&ptp->port.ov_work,
- ice_ptp_wait_for_offsets);
- err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
- ptp->port.port_num);
- }
- if (err)
+/**
+ * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ int err;
+
+ if (ptp->state == ICE_PTP_READY) {
+ ice_ptp_prepare_for_reset(pf, reset_type);
+ } else if (ptp->state != ICE_PTP_RESETTING) {
+ err = -EINVAL;
+ dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
goto err;
+ }
+
+ if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
+ err = ice_ptp_rebuild_owner(pf);
+ if (err)
+ goto err;
+ }
- set_bit(ICE_FLAG_PTP, pf->flags);
+ ptp->state = ICE_PTP_READY;
/* Start periodic work going */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
@@ -2519,6 +2756,7 @@ pfr:
return;
err:
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
@@ -2668,6 +2906,8 @@ static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
ice_get_ptp_src_clock_index(&pf->hw));
+ if (!name)
+ return -ENOMEM;
aux_driver->name = name;
aux_driver->shutdown = ice_ptp_auxbus_shutdown;
@@ -2726,39 +2966,6 @@ int ice_ptp_clock_index(struct ice_pf *pf)
}
/**
- * ice_ptp_prepare_for_reset - Prepare PTP for reset
- * @pf: Board private structure
- */
-void ice_ptp_prepare_for_reset(struct ice_pf *pf)
-{
- struct ice_ptp *ptp = &pf->ptp;
- u8 src_tmr;
-
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
-
- kthread_cancel_delayed_work_sync(&ptp->work);
-
- if (test_bit(ICE_PFR_REQ, pf->state))
- return;
-
- ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
-
- /* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
-
- src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
-
- /* Disable source clock */
- wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
-
- /* Acquire PHC and system timer to restore after reset */
- ptp->reset_time = ktime_get_real_ns();
-}
-
-/**
* ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
* @pf: Board private structure
*
@@ -2770,7 +2977,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
+ int err;
err = ice_ptp_init_phc(hw);
if (err) {
@@ -2803,17 +3010,9 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) {
- /* The clock owner for this device type handles the timestamp
- * interrupt for all ports.
- */
- ice_ptp_configure_tx_tstamp(pf, true);
-
- /* React on all quads interrupts for E82x */
- wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
-
+ if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
goto err_exit;
}
@@ -2880,18 +3079,11 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E822:
- /* Non-owner PFs don't react to any interrupts on E82x,
- * neither on own quad nor on others
- */
- if (!ice_ptp_pf_handles_tx_interrupt(pf)) {
- ice_ptp_configure_tx_tstamp(pf, false);
- wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
- }
+ case ICE_PHY_E82X:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
- return ice_ptp_init_tx_e822(pf, &ptp_port->tx,
+ return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
return -ENODEV;
@@ -2929,6 +3121,8 @@ static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
ice_get_ptp_src_clock_index(&pf->hw));
+ if (!name)
+ return -ENOMEM;
aux_dev->name = name;
aux_dev->id = id;
@@ -2978,7 +3172,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
switch (pf->hw.phy_model) {
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
@@ -3011,6 +3205,8 @@ void ice_ptp_init(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int err;
+ ptp->state = ICE_PTP_INITIALIZING;
+
ice_ptp_init_phy_model(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3032,12 +3228,16 @@ void ice_ptp_init(struct ice_pf *pf)
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
- set_bit(ICE_FLAG_PTP, pf->flags);
- err = ice_ptp_init_work(pf, ptp);
+ /* Configure initial Tx interrupt settings */
+ ice_ptp_cfg_tx_interrupt(pf);
+
+ err = ice_ptp_create_auxbus_device(pf);
if (err)
goto err;
- err = ice_ptp_create_auxbus_device(pf);
+ ptp->state = ICE_PTP_READY;
+
+ err = ice_ptp_init_work(pf, ptp);
if (err)
goto err;
@@ -3050,7 +3250,7 @@ err:
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- clear_bit(ICE_FLAG_PTP, pf->flags);
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3063,18 +3263,18 @@ err:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
+ pf->ptp.state = ICE_PTP_UNINIT;
+
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
ice_ptp_remove_auxbus_device(pf);
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
@@ -3084,6 +3284,9 @@ void ice_ptp_release(struct ice_pf *pf)
pf->ptp.kworker = NULL;
}
+ if (ice_pf_src_tmr_owned(pf))
+ ice_ptp_unregister_auxbus_driver(pf);
+
if (!pf->ptp.clock)
return;
@@ -3093,7 +3296,5 @@ void ice_ptp_release(struct ice_pf *pf)
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
- ice_ptp_unregister_auxbus_driver(pf);
-
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 8f6f94392756..3af20025043a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -100,7 +100,7 @@ struct ice_perout_channel {
* the last timestamp we read for a given index. If the current timestamp
* value is the same as the cached value, we assume a new timestamp hasn't
* been captured. This avoids reporting stale timestamps to the stack. This is
- * only done if the verify_cached flag is set in ice_ptp_tx structure.
+ * only done if the has_ready_bitmap flag is not set in ice_ptp_tx structure.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
@@ -130,7 +130,10 @@ enum ice_tx_tstamp_work {
* @init: if true, the tracker is initialized;
* @calibrating: if true, the PHY is calibrating the Tx offset. During this
* window, timestamps are temporarily disabled.
- * @verify_cached: if true, verify new timestamp differs from last read value
+ * @has_ready_bitmap: if true, the hardware has a valid Tx timestamp ready
+ * bitmap register. If false, fall back to verifying new
+ * timestamp values against previously cached copy.
+ * @last_ll_ts_idx_read: index of the last LL TS read by the FW
*/
struct ice_ptp_tx {
spinlock_t lock; /* lock protecting in_use bitmap */
@@ -142,12 +145,13 @@ struct ice_ptp_tx {
u8 len;
u8 init : 1;
u8 calibrating : 1;
- u8 verify_cached : 1;
+ u8 has_ready_bitmap : 1;
+ s8 last_ll_ts_idx_read;
};
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
-#define INDEX_PER_PORT_E822 16
+#define INDEX_PER_PORT_E82X 16
#define INDEX_PER_PORT_E810 64
/**
@@ -201,8 +205,17 @@ struct ice_ptp_port_owner {
#define GLTSYN_TGT_H_IDX_MAX 4
+enum ice_ptp_state {
+ ICE_PTP_UNINIT = 0,
+ ICE_PTP_INITIALIZING,
+ ICE_PTP_READY,
+ ICE_PTP_RESETTING,
+ ICE_PTP_ERROR,
+};
+
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @state: current state of PTP state machine
* @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
* @ports_owner: data for the auxiliary driver owner
@@ -225,6 +238,7 @@ struct ice_ptp_port_owner {
* @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
+ enum ice_ptp_state state;
enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
struct ice_ptp_port_owner ports_owner;
@@ -292,17 +306,19 @@ int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
+void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
+void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
-void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
-void ice_ptp_reset(struct ice_pf *pf);
-void ice_ptp_prepare_for_reset(struct ice_pf *pf);
+u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx);
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
@@ -317,8 +333,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
-static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
-
+static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { }
static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
static inline s8
ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
@@ -326,15 +341,32 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return -1;
}
+static inline void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
+{ }
+
+static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { }
+
static inline bool ice_ptp_process_ts(struct ice_pf *pf)
{
return true;
}
-static inline void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
-static inline void ice_ptp_reset(struct ice_pf *pf) { }
-static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
+
+static inline u64
+ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx)
+{
+ return 0;
+}
+
+static inline void ice_ptp_rebuild(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
+
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 4109aa3b2fcd..2c4dab0c48ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -9,17 +9,17 @@
*/
/* Constants defined for the PTP 1588 clock hardware. */
-/* struct ice_time_ref_info_e822
+/* struct ice_time_ref_info_e82x
*
* E822 hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
* This lookup table defines several constants that depend on the current time
- * reference. See the struct ice_time_ref_info_e822 for information about the
+ * reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
@@ -81,7 +81,7 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
},
};
-const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* refclk_pre_div */
@@ -155,7 +155,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
},
};
-/* struct ice_vernier_info_e822
+/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
* actual packet transmission or reception during the initialization of the
@@ -168,7 +168,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
* used by this link speed, and that the register should be cleared by writing
* 0. Other values specify the clock frequency in Hz.
*/
-const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* ICE_PTP_LNK_SPD_1G */
{
/* tx_par_clk */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 6d573908de7a..2b9423a173bb 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -274,6 +274,9 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
}
@@ -284,19 +287,19 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
- * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * ice_fill_phy_msg_e82x - Fill message data for a PHY register access
* @msg: the PHY message buffer to fill in
* @port: the port to access
* @offset: the register offset
*/
static void
-ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY_E822;
- phy = port / ICE_PORTS_PER_PHY_E822;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822;
+ phy_port = port % ICE_PORTS_PER_PHY_E82X;
+ phy = port / ICE_PORTS_PER_PHY_E82X;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -315,7 +318,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
}
/**
- * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
*
@@ -323,7 +326,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
* represented as two 32bit registers. If it is, return the appropriate high
* register offset to use.
*/
-static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
{
switch (low_addr) {
case P_REG_PAR_PCS_TX_OFFSET_L:
@@ -368,7 +371,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
}
/**
- * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 40bit value
*
@@ -377,7 +380,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
* upper 32 bits in the high register. If it is, return the appropriate high
* register offset to use.
*/
-static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
{
switch (low_addr) {
case P_REG_TIMETUS_L:
@@ -413,7 +416,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
}
/**
- * ice_read_phy_reg_e822 - Read a PHY register
+ * ice_read_phy_reg_e82x - Read a PHY register
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @offset: PHY register offset to read
@@ -422,12 +425,12 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
* Read a PHY register for the given port over the device sideband queue.
*/
static int
-ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e822(&msg, port, offset);
+ ice_fill_phy_msg_e82x(&msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
err = ice_sbq_rw_reg(hw, &msg);
@@ -443,7 +446,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
}
/**
- * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @low_addr: offset of the lower register to read from
@@ -455,7 +458,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
* known to be two parts of a 64bit value.
*/
static int
-ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
{
u32 low, high;
u16 high_addr;
@@ -464,20 +467,20 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
/* Only operate on registers known to be split into two 32bit
* registers.
*/
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
low_addr);
return -EINVAL;
}
- err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ err = ice_read_phy_reg_e82x(hw, port, low_addr, &low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+ err = ice_read_phy_reg_e82x(hw, port, high_addr, &high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
high_addr, err);
@@ -490,7 +493,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
}
/**
- * ice_write_phy_reg_e822 - Write a PHY register
+ * ice_write_phy_reg_e82x - Write a PHY register
* @hw: pointer to the HW struct
* @port: PHY port to write to
* @offset: PHY register offset to write
@@ -499,12 +502,12 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
* Write a PHY register for the given port over the device sideband queue.
*/
static int
-ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e822(&msg, port, offset);
+ ice_fill_phy_msg_e82x(&msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
@@ -519,7 +522,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
}
/**
- * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY
* @hw: pointer to the HW struct
* @port: port to write to
* @low_addr: offset of the low register
@@ -529,7 +532,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
* it up into two chunks, the lower 8 bits and the upper 32 bits.
*/
static int
-ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
u32 low, high;
u16 high_addr;
@@ -538,7 +541,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/* Only operate on registers known to be split into a lower 8 bit
* register and an upper 32 bit register.
*/
- if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
low_addr);
return -EINVAL;
@@ -547,14 +550,14 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low = (u32)(val & P_REG_40B_LOW_M);
high = (u32)(val >> P_REG_40B_HIGH_S);
- err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
high_addr, err);
@@ -565,7 +568,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
}
/**
- * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @low_addr: offset of the lower register to read from
@@ -577,7 +580,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* a 64bit value.
*/
static int
-ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
u32 low, high;
u16 high_addr;
@@ -586,7 +589,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/* Only operate on registers known to be split into two 32bit
* registers.
*/
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
low_addr);
return -EINVAL;
@@ -595,14 +598,14 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low = lower_32_bits(val);
high = upper_32_bits(val);
- err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
high_addr, err);
@@ -613,7 +616,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
}
/**
- * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * ice_fill_quad_msg_e82x - Fill message data for quad register access
* @msg: the PHY message buffer to fill in
* @quad: the quad to access
* @offset: the register offset
@@ -622,7 +625,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* multiple PHYs.
*/
static int
-ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
{
u32 addr;
@@ -631,7 +634,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
msg->dest_dev = rmn_0;
- if ((quad % ICE_QUADS_PER_PHY_E822) == 0)
+ if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
@@ -643,7 +646,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
}
/**
- * ice_read_quad_reg_e822 - Read a PHY quad register
+ * ice_read_quad_reg_e82x - Read a PHY quad register
* @hw: pointer to the HW struct
* @quad: quad to read from
* @offset: quad register offset to read
@@ -653,12 +656,12 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
* shared between multiple PHYs.
*/
int
-ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(&msg, quad, offset);
if (err)
return err;
@@ -677,7 +680,7 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
}
/**
- * ice_write_quad_reg_e822 - Write a PHY quad register
+ * ice_write_quad_reg_e82x - Write a PHY quad register
* @hw: pointer to the HW struct
* @quad: quad to write to
* @offset: quad register offset to write
@@ -687,12 +690,12 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
* shared between multiple PHYs.
*/
int
-ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(&msg, quad, offset);
if (err)
return err;
@@ -710,7 +713,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
}
/**
- * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
* @idx: the timestamp index to read
@@ -721,7 +724,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
* family of devices.
*/
static int
-ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
{
u16 lo_addr, hi_addr;
u32 lo, hi;
@@ -730,14 +733,14 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
- err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
err);
return err;
}
- err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
err);
@@ -754,7 +757,7 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
}
/**
- * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
* @idx: the timestamp index to reset
@@ -770,18 +773,18 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
*
* To directly clear the contents of the timestamp block entirely, discarding
* all timestamp data at once, software should instead use
- * ice_ptp_reset_ts_memory_quad_e822().
+ * ice_ptp_reset_ts_memory_quad_e82x().
*
* This function should only be called on an idx whose bit is set according to
* ice_get_phy_tx_tstamp_ready().
*/
static int
-ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx)
{
u64 unused_tstamp;
int err;
- err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp);
+ err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
quad, idx, err);
@@ -792,33 +795,33 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
}
/**
- * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block
+ * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
*
* Clear all timestamps from the PHY quad block that is shared between the
* internal PHYs on the E822 devices.
*/
-void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad)
+void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad)
{
- ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
- ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
}
/**
- * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks
+ * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks
* @hw: pointer to the HW struct
*/
-static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
+static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
{
unsigned int quad;
for (quad = 0; quad < ICE_MAX_QUAD; quad++)
- ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
}
/**
- * ice_read_cgu_reg_e822 - Read a CGU register
+ * ice_read_cgu_reg_e82x - Read a CGU register
* @hw: pointer to the HW struct
* @addr: Register address to read
* @val: storage for register value read
@@ -827,7 +830,7 @@ static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
* applicable to E822 devices.
*/
static int
-ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg;
int err;
@@ -850,7 +853,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
}
/**
- * ice_write_cgu_reg_e822 - Write a CGU register
+ * ice_write_cgu_reg_e82x - Write a CGU register
* @hw: pointer to the HW struct
* @addr: Register address to write
* @val: value to write into the register
@@ -859,7 +862,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
* applicable to E822 devices.
*/
static int
-ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg;
int err;
@@ -925,7 +928,7 @@ static const char *ice_clk_src_str(u8 clk_src)
}
/**
- * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
* @hw: pointer to the HW struct
* @clk_freq: Clock frequency to program
* @clk_src: Clock source to select (TIME_REF, or TCX0)
@@ -934,7 +937,7 @@ static const char *ice_clk_src_str(u8 clk_src)
* time reference, enabling the PLL which drives the PTP hardware clock.
*/
static int
-ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
enum ice_clk_src clk_src)
{
union tspll_ro_bwm_lf bwm_lf;
@@ -963,15 +966,15 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
return -EINVAL;
}
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
if (err)
return err;
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
if (err)
return err;
- err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
@@ -986,43 +989,43 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
if (dw24.field.ts_pll_enable) {
dw24.field.ts_pll_enable = 0;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
}
/* Set the frequency */
dw9.field.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
if (err)
return err;
/* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
if (err)
return err;
dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
dw19.field.tspll_ndivratio = 1;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
if (err)
return err;
/* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
if (err)
return err;
dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
dw22.field.time1588clk_sel_div2 = 0;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
if (err)
return err;
/* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
if (err)
return err;
@@ -1030,21 +1033,21 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
dw24.field.time_ref_sel = clk_src;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
/* Finally, enable the PLL */
dw24.field.ts_pll_enable = 1;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
/* Wait to verify if the PLL locks */
usleep_range(1000, 5000);
- err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
@@ -1064,18 +1067,18 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
}
/**
- * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * ice_init_cgu_e82x - Initialize CGU with settings from firmware
* @hw: pointer to the HW structure
*
* Initialize the Clock Generation Unit of the E822 device.
*/
-static int ice_init_cgu_e822(struct ice_hw *hw)
+static int ice_init_cgu_e82x(struct ice_hw *hw)
{
struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
union tspll_cntr_bist_settings cntr_bist;
int err;
- err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
&cntr_bist.val);
if (err)
return err;
@@ -1084,7 +1087,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw)
cntr_bist.field.i_plllock_sel_0 = 0;
cntr_bist.field.i_plllock_sel_1 = 0;
- err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
cntr_bist.val);
if (err)
return err;
@@ -1092,7 +1095,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
if (err)
return err;
@@ -1113,7 +1116,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
PTP_VERNIER_WL);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
@@ -1126,12 +1129,12 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization
* @hw: pointer to HW struct
*
* Perform PHC initialization steps specific to E822 devices.
*/
-static int ice_ptp_init_phc_e822(struct ice_hw *hw)
+static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
int err;
u32 regval;
@@ -1145,7 +1148,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
wr32(hw, PF_SB_REM_DEV_CTL, regval);
/* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e822(hw);
+ err = ice_init_cgu_e82x(hw);
if (err)
return err;
@@ -1154,7 +1157,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
}
/**
- * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time
* @hw: pointer to the HW struct
* @time: Time to initialize the PHY port clocks to
*
@@ -1164,7 +1167,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
* units of nominal nanoseconds.
*/
static int
-ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
{
u64 phy_time;
u8 port;
@@ -1177,14 +1180,14 @@ ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
/* Tx case */
- err = ice_write_64b_phy_reg_e822(hw, port,
+ err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_TX_TIMER_INC_PRE_L,
phy_time);
if (err)
goto exit_err;
/* Rx case */
- err = ice_write_64b_phy_reg_e822(hw, port,
+ err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_RX_TIMER_INC_PRE_L,
phy_time);
if (err)
@@ -1201,7 +1204,7 @@ exit_err:
}
/**
- * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust
* @hw: pointer to HW struct
* @port: Port number to be programmed
* @time: time in cycles to adjust the port Tx and Rx clocks
@@ -1216,7 +1219,7 @@ exit_err:
* Negative adjustments are supported using 2s complement arithmetic.
*/
static int
-ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
+ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time)
{
u32 l_time, u_time;
int err;
@@ -1225,23 +1228,23 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
u_time = upper_32_bits(time);
/* Tx case */
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L,
l_time);
if (err)
goto exit_err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U,
u_time);
if (err)
goto exit_err;
/* Rx case */
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L,
l_time);
if (err)
goto exit_err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U,
u_time);
if (err)
goto exit_err;
@@ -1255,7 +1258,7 @@ exit_err:
}
/**
- * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment in nanoseconds
*
@@ -1264,7 +1267,7 @@ exit_err:
* ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
*/
static int
-ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
+ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
{
s64 cycles;
u8 port;
@@ -1281,7 +1284,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
+ err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
if (err)
return err;
}
@@ -1290,7 +1293,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
}
/**
- * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment
* @hw: pointer to HW struct
* @incval: new increment value to prepare
*
@@ -1299,13 +1302,13 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
* issuing an ICE_PTP_INIT_INCVAL command.
*/
static int
-ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
{
int err;
u8 port;
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
incval);
if (err)
goto exit_err;
@@ -1337,7 +1340,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
int err;
/* Tx case */
- err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+ err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
err);
@@ -1348,7 +1351,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
(unsigned long long)*tx_ts);
/* Rx case */
- err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+ err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
err);
@@ -1362,7 +1365,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
}
/**
- * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
+ * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command
* @hw: pointer to HW struct
* @port: Port to which cmd has to be sent
* @cmd: Command to be sent to the port
@@ -1372,8 +1375,8 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
* Do not use this function directly. If you want to configure exactly one
* port, use ice_ptp_one_port_cmd() instead.
*/
-static int
-ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
u8 tmr_idx;
@@ -1403,7 +1406,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
/* Tx case */
/* Read, modify, write */
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
err);
@@ -1414,7 +1417,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
val &= ~TS_CMD_MASK;
val |= cmd_val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
err);
@@ -1423,7 +1426,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
/* Rx case */
/* Read, modify, write */
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
err);
@@ -1434,7 +1437,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
val &= ~TS_CMD_MASK;
val |= cmd_val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
err);
@@ -1469,7 +1472,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
else
cmd = ICE_PTP_NOP;
- err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
if (err)
return err;
}
@@ -1478,7 +1481,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
}
/**
- * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
* @hw: pointer to the HW struct
* @cmd: timer command to prepare
*
@@ -1486,14 +1489,14 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
* command.
*/
static int
-ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u8 port;
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
if (err)
return err;
}
@@ -1509,7 +1512,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
/**
- * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode
* @hw: pointer to HW struct
* @port: the port to read from
* @link_out: if non-NULL, holds link speed on success
@@ -1519,7 +1522,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
* algorithm.
*/
static int
-ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_link_spd *link_out,
enum ice_ptp_fec_mode *fec_out)
{
@@ -1528,7 +1531,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
u32 serdes;
int err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
return err;
@@ -1585,18 +1588,18 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
}
/**
- * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp
* @hw: pointer to HW struct
* @port: to configure the quad for
*/
-static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
int err;
u32 val;
u8 quad;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
err);
@@ -1605,7 +1608,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
quad = port / ICE_PORTS_PER_QUAD;
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
err);
@@ -1617,7 +1620,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
else
val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
- err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+ err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
err);
@@ -1626,7 +1629,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
+ * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822
* @hw: pointer to the HW structure
* @port: the port to configure
*
@@ -1671,12 +1674,12 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
* a divide by 390,625,000. This does lose some precision, but avoids
* miscalculation due to arithmetic overflow.
*/
-static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
+static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port)
{
u64 cur_freq, clk_incval, tu_per_sec, uix;
int err;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second divided by 256 */
@@ -1688,7 +1691,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
/* Program the 10Gb/40Gb conversion ratio */
uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L,
uix);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
@@ -1699,7 +1702,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
/* Program the 25Gb/100Gb conversion ratio */
uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L,
uix);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
@@ -1711,7 +1714,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
+ * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle
* @hw: pointer to the HW struct
* @port: port to configure
*
@@ -1753,18 +1756,18 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
* frequency is ~29 bits, so multiplying them together should fit within the
* 64 bit arithmetic.
*/
-static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
+static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port)
{
u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
int err;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per cycle of the PHC clock */
@@ -1784,7 +1787,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1796,7 +1799,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1808,7 +1811,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1820,7 +1823,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1832,7 +1835,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1844,7 +1847,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1856,7 +1859,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1868,23 +1871,23 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
+ return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L,
phy_tus);
}
/**
- * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
+ * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port
* @hw: pointer to the HW struct
* @link_spd: the Link speed to calculate for
*
* Calculate the fixed offset due to known static latency data.
*/
static u64
-ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
{
u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -1904,7 +1907,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
}
/**
- * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
+ * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset
* @hw: pointer to the HW struct
* @port: the PHY port to configure
*
@@ -1926,7 +1929,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* Returns zero on success, -EBUSY if the hardware vernier offset
* calibration has not completed, or another error code on failure.
*/
-int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
+int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
@@ -1935,7 +1938,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
u32 reg;
/* Nothing to do if we've already programmed the offset */
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
port, err);
@@ -1945,7 +1948,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
if (reg)
return 0;
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
port, err);
@@ -1955,11 +1958,11 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
if (!(reg & P_REG_TX_OV_STATUS_OV_M))
return -EBUSY;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+ total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd);
/* Read the first Vernier offset from the PHY register and add it to
* the total offset.
@@ -1970,7 +1973,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
link_spd == ICE_PTP_LNK_SPD_25G_RS ||
link_spd == ICE_PTP_LNK_SPD_40G ||
link_spd == ICE_PTP_LNK_SPD_50G) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_PCS_TX_OFFSET_L,
&val);
if (err)
@@ -1985,7 +1988,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
*/
if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
link_spd == ICE_PTP_LNK_SPD_100G_RS) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_TX_TIME_L,
&val);
if (err)
@@ -1998,12 +2001,12 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
* PHY and indicate that the Tx offset is ready. After this,
* timestamps will be enabled.
*/
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L,
total_offset);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1);
if (err)
return err;
@@ -2014,7 +2017,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
+ * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx
* @hw: pointer to the HW struct
* @port: the PHY port to adjust for
* @link_spd: the current link speed of the PHY
@@ -2026,7 +2029,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
* various delays caused when receiving a packet.
*/
static int
-ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
+ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_link_spd link_spd,
enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
{
@@ -2035,7 +2038,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u32 val;
int err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
err);
@@ -2044,7 +2047,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
pmd_align = (u8)val;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -2123,7 +2126,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u64 cycle_adj;
u8 rx_cycle;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT,
&val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
@@ -2145,7 +2148,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u64 cycle_adj;
u8 rx_cycle;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT,
&val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
@@ -2172,18 +2175,18 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
}
/**
- * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
+ * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port
* @hw: pointer to HW struct
* @link_spd: The Link speed to calculate for
*
* Determine the fixed Rx latency for a given link speed.
*/
static u64
-ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
{
u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -2203,7 +2206,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
}
/**
- * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
+ * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset
* @hw: pointer to the HW struct
* @port: the PHY port to configure
*
@@ -2229,7 +2232,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* Returns zero on success, -EBUSY if the hardware vernier offset
* calibration has not completed, or another error code on failure.
*/
-int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
+int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
@@ -2238,7 +2241,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
u32 reg;
/* Nothing to do if we've already programmed the offset */
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
port, err);
@@ -2248,7 +2251,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
if (reg)
return 0;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
port, err);
@@ -2258,16 +2261,16 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
if (!(reg & P_REG_RX_OV_STATUS_OV_M))
return -EBUSY;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+ total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd);
/* Read the first Vernier offset from the PHY register and add it to
* the total offset.
*/
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_PCS_RX_OFFSET_L,
&val);
if (err)
@@ -2282,7 +2285,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
link_spd == ICE_PTP_LNK_SPD_50G ||
link_spd == ICE_PTP_LNK_SPD_50G_RS ||
link_spd == ICE_PTP_LNK_SPD_100G_RS) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_RX_TIME_L,
&val);
if (err)
@@ -2292,7 +2295,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
}
/* In addition, Rx must account for the PMD alignment */
- err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
+ err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
if (err)
return err;
@@ -2308,12 +2311,12 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* PHY and indicate that the Rx offset is ready. After this,
* timestamps will be enabled.
*/
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L,
total_offset);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1);
if (err)
return err;
@@ -2324,7 +2327,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
+ * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
* @hw: pointer to the HW struct
* @port: the PHY port to read
* @phy_time: on return, the 64bit PHY timer value
@@ -2334,7 +2337,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* and PHC timer values.
*/
static int
-ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
+ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time,
u64 *phc_time)
{
u64 tx_time, rx_time;
@@ -2381,7 +2384,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
}
/**
- * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
+ * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer
* @hw: pointer to the HW struct
* @port: the PHY port to synchronize
*
@@ -2392,7 +2395,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
* to the PHY timer in order to ensure it reads the same value as the
* primary PHC timer.
*/
-static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port)
{
u64 phc_time, phy_time, difference;
int err;
@@ -2402,7 +2405,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
return -EBUSY;
}
- err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
if (err)
goto err_unlock;
@@ -2416,7 +2419,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
*/
difference = phc_time - phy_time;
- err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
+ err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference);
if (err)
goto err_unlock;
@@ -2433,7 +2436,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
/* Re-capture the timer values to flush the command registers and
* verify that the time was properly adjusted.
*/
- err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
if (err)
goto err_unlock;
@@ -2452,7 +2455,7 @@ err_unlock:
}
/**
- * ice_stop_phy_timer_e822 - Stop the PHY clock timer
+ * ice_stop_phy_timer_e82x - Stop the PHY clock timer
* @hw: pointer to the HW struct
* @port: the PHY port to stop
* @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
@@ -2462,36 +2465,36 @@ err_unlock:
* initialized or when link speed changes.
*/
int
-ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
+ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset)
{
int err;
u32 val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
if (err)
return err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
if (err)
return err;
val &= ~P_REG_PS_START_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val &= ~P_REG_PS_ENA_CLK_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
if (soft_reset) {
val |= P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
}
@@ -2502,7 +2505,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
}
/**
- * ice_start_phy_timer_e822 - Start the PHY clock timer
+ * ice_start_phy_timer_e82x - Start the PHY clock timer
* @hw: pointer to the HW struct
* @port: the PHY port to start
*
@@ -2512,7 +2515,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
*
* Hardware will take Vernier measurements on Tx or Rx of packets.
*/
-int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
+int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port)
{
u32 lo, hi, val;
u64 incval;
@@ -2521,17 +2524,17 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
tmr_idx = ice_get_ptp_src_clock_index(hw);
- err = ice_stop_phy_timer_e822(hw, port, false);
+ err = ice_stop_phy_timer_e82x(hw, port, false);
if (err)
return err;
- ice_phy_cfg_lane_e822(hw, port);
+ ice_phy_cfg_lane_e82x(hw, port);
- err = ice_phy_cfg_uix_e822(hw, port);
+ err = ice_phy_cfg_uix_e82x(hw, port);
if (err)
return err;
- err = ice_phy_cfg_parpcs_e822(hw, port);
+ err = ice_phy_cfg_parpcs_e82x(hw, port);
if (err)
return err;
@@ -2539,7 +2542,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
incval = (u64)hi << 32 | lo;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval);
if (err)
return err;
@@ -2552,22 +2555,22 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
ice_ptp_exec_tmr_cmd(hw);
- err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
if (err)
return err;
val |= P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val |= P_REG_PS_START_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val &= ~P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
@@ -2578,18 +2581,18 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
ice_ptp_exec_tmr_cmd(hw);
val |= P_REG_PS_ENA_CLK_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val |= P_REG_PS_LOAD_OFFSET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
ice_ptp_exec_tmr_cmd(hw);
- err = ice_sync_phy_timer_e822(hw, port);
+ err = ice_sync_phy_timer_e82x(hw, port);
if (err)
return err;
@@ -2599,7 +2602,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register
+ * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register
* @hw: pointer to the HW struct
* @quad: the timestamp quad to read from
* @tstamp_ready: contents of the Tx memory status register
@@ -2609,19 +2612,19 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
* ready to be captured from the PHY timestamp block.
*/
static int
-ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
+ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
{
u32 hi, lo;
int err;
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
quad, err);
return err;
}
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
quad, err);
@@ -3307,7 +3310,7 @@ void ice_ptp_init_phy_model(struct ice_hw *hw)
if (ice_is_e810(hw))
hw->phy_model = ICE_PHY_E810;
else
- hw->phy_model = ICE_PHY_E822;
+ hw->phy_model = ICE_PHY_E82X;
}
/**
@@ -3332,8 +3335,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
case ICE_PHY_E810:
err = ice_ptp_port_cmd_e810(hw, cmd);
break;
- case ICE_PHY_E822:
- err = ice_ptp_port_cmd_e822(hw, cmd);
+ case ICE_PHY_E82X:
+ err = ice_ptp_port_cmd_e82x(hw, cmd);
break;
default:
err = -EOPNOTSUPP;
@@ -3384,8 +3387,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
default:
err = -EOPNOTSUPP;
@@ -3426,8 +3429,8 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
default:
err = -EOPNOTSUPP;
@@ -3492,8 +3495,8 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
default:
err = -EOPNOTSUPP;
@@ -3521,8 +3524,8 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E822:
- return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ case ICE_PHY_E82X:
+ return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -3549,8 +3552,8 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E822:
- return ice_clear_phy_tstamp_e822(hw, block, idx);
+ case ICE_PHY_E82X:
+ return ice_clear_phy_tstamp_e82x(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -3608,8 +3611,8 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
switch (hw->phy_model) {
- case ICE_PHY_E822:
- ice_ptp_reset_ts_memory_e822(hw);
+ case ICE_PHY_E82X:
+ ice_ptp_reset_ts_memory_e82x(hw);
break;
case ICE_PHY_E810:
default:
@@ -3636,8 +3639,8 @@ int ice_ptp_init_phc(struct ice_hw *hw)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E822:
- return ice_ptp_init_phc_e822(hw);
+ case ICE_PHY_E82X:
+ return ice_ptp_init_phc_e82x(hw);
default:
return -EOPNOTSUPP;
}
@@ -3660,8 +3663,8 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E822:
- return ice_get_phy_tx_tstamp_ready_e822(hw, block,
+ case ICE_PHY_E82X:
+ return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
break;
default:
@@ -3942,7 +3945,7 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_SGMII:
- *pin_num = ICE_E822_RCLK_PINS_NUM;
+ *pin_num = ICE_E82X_RCLK_PINS_NUM;
ret = 0;
if (hw->cgu_part_number ==
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
@@ -3961,3 +3964,57 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
return ret;
}
+
+/**
+ * ice_cgu_get_output_pin_state_caps - get output pin state capabilities
+ * @hw: pointer to the hw struct
+ * @pin_id: id of a pin
+ * @caps: capabilities to modify
+ *
+ * Return:
+ * * 0 - success, state capabilities were modified
+ * * negative - failure, capabilities were not modified
+ */
+int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
+ unsigned long *caps)
+{
+ bool can_change = true;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
+ can_change = false;
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
+ can_change = false;
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
+ pin_id == ZL_OUT2)
+ can_change = false;
+ else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
+ pin_id == SI_OUT1)
+ can_change = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (can_change)
+ *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ else
+ *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 36aeeef99ec0..1f3e03124430 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -42,7 +42,7 @@ enum ice_ptp_fec_mode {
};
/**
- * struct ice_time_ref_info_e822
+ * struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
* @pps_delay: propagation delay of the PPS output signal
@@ -50,14 +50,14 @@ enum ice_ptp_fec_mode {
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
*/
-struct ice_time_ref_info_e822 {
+struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
u8 pps_delay;
};
/**
- * struct ice_vernier_info_e822
+ * struct ice_vernier_info_e82x
* @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS
* @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS
* @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS
@@ -80,7 +80,7 @@ struct ice_time_ref_info_e822 {
* different link speeds, either the deskew marker for multi-lane link speeds
* or the Reed Solomon gearbox marker for RS-FEC.
*/
-struct ice_vernier_info_e822 {
+struct ice_vernier_info_e82x {
u32 tx_par_clk;
u32 rx_par_clk;
u32 tx_pcs_clk;
@@ -95,7 +95,7 @@ struct ice_vernier_info_e822 {
};
/**
- * struct ice_cgu_pll_params_e822
+ * struct ice_cgu_pll_params_e82x
* @refclk_pre_div: Reference clock pre-divisor
* @feedback_div: Feedback divisor
* @frac_n_div: Fractional divisor
@@ -104,7 +104,7 @@ struct ice_vernier_info_e822 {
* Clock Generation Unit parameters used to program the PLL based on the
* selected TIME_REF frequency.
*/
-struct ice_cgu_pll_params_e822 {
+struct ice_cgu_pll_params_e82x {
u32 refclk_pre_div;
u32 feedback_div;
u32 frac_n_div;
@@ -124,7 +124,7 @@ enum ice_phy_rclk_pins {
};
#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1)
-#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
+#define ICE_E82X_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \
(_pin) + ZL_REF1P)
@@ -183,16 +183,16 @@ struct ice_cgu_pin_desc {
};
extern const struct
-ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
/* Table of constants for Vernier calibration on E822 */
-extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
+extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
@@ -215,23 +215,23 @@ int ice_ptp_init_phc(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
/* E822 family functions */
-int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
-int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
-void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);
+int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+int ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
/**
- * ice_e822_time_ref - Get the current TIME_REF from capabilities
+ * ice_e82x_time_ref - Get the current TIME_REF from capabilities
* @hw: pointer to the HW structure
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
+static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
/**
- * ice_set_e822_time_ref - Set new TIME_REF
+ * ice_set_e82x_time_ref - Set new TIME_REF
* @hw: pointer to the HW structure
* @time_ref: new TIME_REF to set
*
@@ -239,31 +239,31 @@ static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].pps_delay;
}
/* E822 Vernier calibration functions */
-int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset);
-int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port);
+int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
@@ -282,6 +282,8 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
void ice_ptp_init_phy_model(struct ice_hw *hw);
+int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
+ unsigned long *caps);
#define PFTSYN_SEM_BYTES 4
@@ -507,6 +509,7 @@ void ice_ptp_init_phy_model(struct ice_hw *hw);
#define TS_LL_READ_RETRIES 200
#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
#define TS_LL_READ_TS_IDX GENMASK(29, 24)
+#define TS_LL_READ_TS_INTR BIT(30)
#define TS_LL_READ_TS BIT(31)
/* Internal PHY timestamp address */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index c686ac0935eb..d367f4c66dcd 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -3,42 +3,51 @@
#include "ice.h"
#include "ice_eswitch.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
+#include "devlink/devlink_port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
/**
- * ice_repr_get_sw_port_id - get port ID associated with representor
- * @repr: pointer to port representor
+ * ice_repr_inc_tx_stats - increment Tx statistic by one packet
+ * @repr: repr to increment stats on
+ * @len: length of the packet
+ * @xmit_status: value returned by xmit function
*/
-static int ice_repr_get_sw_port_id(struct ice_repr *repr)
+void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
+ int xmit_status)
{
- return repr->vf->pf->hw.port_info->lport;
+ struct ice_repr_pcpu_stats *stats;
+
+ if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
+ xmit_status != NET_XMIT_CN)) {
+ this_cpu_inc(repr->stats->tx_drops);
+ return;
+ }
+
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
}
/**
- * ice_repr_get_phys_port_name - get phys port name
- * @netdev: pointer to port representor netdev
- * @buf: write here port name
- * @len: max length of buf
+ * ice_repr_inc_rx_stats - increment Rx statistic by one packet
+ * @netdev: repr netdev to increment stats on
+ * @len: length of the packet
*/
-static int
-ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
+void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_repr *repr = np->repr;
- int res;
-
- /* Devlink port is registered and devlink core is taking care of name formatting. */
- if (repr->vf->devlink_port.devlink)
- return -EOPNOTSUPP;
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_repr_pcpu_stats *stats;
- res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
- repr->vf->vf_id);
- if (res <= 0)
- return -EOPNOTSUPP;
- return 0;
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
}
/**
@@ -76,7 +85,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
* ice_netdev_to_repr - Get port representor for given netdevice
* @netdev: pointer to port representor netdev
*/
-struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
+struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -139,38 +148,35 @@ static int ice_repr_stop(struct net_device *netdev)
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
* @stats: netlink stats structure
- *
- * RX/TX stats are being swapped here to be consistent with VF stats. In slow
- * path, port representor receives data when the corresponding VF is sending it
- * (and vice versa), TX and RX bytes/packets are effectively swapped on port
- * representor.
*/
static int
ice_repr_sp_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- int vf_id = np->repr->vf->vf_id;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- u64 pkts, bytes;
-
- tx_ring = np->vsi->tx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
- tx_ring->ring_stats->stats,
- &pkts, &bytes);
- stats->rx_packets = pkts;
- stats->rx_bytes = bytes;
-
- rx_ring = np->vsi->rx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
- rx_ring->ring_stats->stats,
- &pkts, &bytes);
- stats->tx_packets = pkts;
- stats->tx_bytes = bytes;
- stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
- rx_ring->ring_stats->rx_stats.alloc_buf_failed;
-
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+ struct ice_repr_pcpu_stats *repr_stats;
+ unsigned int start;
+
+ repr_stats = per_cpu_ptr(repr->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&repr_stats->syncp);
+ tbytes = repr_stats->tx_bytes;
+ tpkts = repr_stats->tx_packets;
+ tdrops = repr_stats->tx_drops;
+ rbytes = repr_stats->rx_bytes;
+ rpkts = repr_stats->rx_packets;
+ } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
+
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+ stats->tx_dropped += tdrops;
+ stats->rx_bytes += rbytes;
+ stats->rx_packets += rpkts;
+ }
return 0;
}
@@ -240,7 +246,6 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
static const struct net_device_ops ice_repr_netdev_ops = {
- .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
.ndo_get_stats64 = ice_repr_get_stats64,
.ndo_open = ice_repr_open,
.ndo_stop = ice_repr_stop,
@@ -278,25 +283,66 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
+static void ice_repr_remove_node(struct devlink_port *devlink_port)
+{
+ devl_lock(devlink_port->devlink);
+ devl_rate_leaf_destroy(devlink_port);
+ devl_unlock(devlink_port->devlink);
+}
+
/**
- * ice_repr_add - add representor for VF
- * @vf: pointer to VF structure
+ * ice_repr_rem - remove representor from VF
+ * @repr: pointer to representor structure
*/
-static int ice_repr_add(struct ice_vf *vf)
+static void ice_repr_rem(struct ice_repr *repr)
+{
+ free_percpu(repr->stats);
+ free_netdev(repr->netdev);
+ kfree(repr);
+}
+
+/**
+ * ice_repr_rem_vf - remove representor from VF
+ * @repr: pointer to representor structure
+ */
+void ice_repr_rem_vf(struct ice_repr *repr)
+{
+ ice_repr_remove_node(&repr->vf->devlink_port);
+ unregister_netdev(repr->netdev);
+ ice_devlink_destroy_vf_port(repr->vf);
+ ice_virtchnl_set_dflt_ops(repr->vf);
+ ice_repr_rem(repr);
+}
+
+static void ice_repr_set_tx_topology(struct ice_pf *pf)
+{
+ struct devlink *devlink;
+
+ /* only export if ADQ and DCB disabled and eswitch enabled*/
+ if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
+ !ice_is_switchdev_running(pf))
+ return;
+
+ devlink = priv_to_devlink(pf);
+ ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
+}
+
+/**
+ * ice_repr_add - add representor for generic VSI
+ * @pf: pointer to PF structure
+ * @src_vsi: pointer to VSI structure of device to represent
+ * @parent_mac: device MAC address
+ */
+static struct ice_repr *
+ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
{
- struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
- struct ice_vsi *vsi;
int err;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
@@ -304,22 +350,49 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc;
}
- repr->src_vsi = vsi;
- repr->vf = vf;
- vf->repr = repr;
+ repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
+ if (!repr->stats) {
+ err = -ENOMEM;
+ goto err_stats;
+ }
+
+ repr->src_vsi = src_vsi;
+ repr->id = src_vsi->vsi_num;
np = netdev_priv(repr->netdev);
np->repr = repr;
- q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector) {
- err = -ENOMEM;
- goto err_alloc_q_vector;
- }
- repr->q_vector = q_vector;
+ ether_addr_copy(repr->parent_mac, parent_mac);
+
+ return repr;
+
+err_stats:
+ free_netdev(repr->netdev);
+err_alloc:
+ kfree(repr);
+ return ERR_PTR(err);
+}
+
+struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
+{
+ struct ice_repr *repr;
+ struct ice_vsi *vsi;
+ int err;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return ERR_PTR(-ENOENT);
err = ice_devlink_create_vf_port(vf);
if (err)
- goto err_devlink;
+ return ERR_PTR(err);
+
+ repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
+ if (IS_ERR(repr)) {
+ err = PTR_ERR(repr);
+ goto err_repr_add;
+ }
+
+ repr->vf = vf;
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
@@ -331,100 +404,23 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_netdev;
ice_virtchnl_set_repr_ops(vf);
+ ice_repr_set_tx_topology(vf->pf);
- return 0;
+ return repr;
err_netdev:
+ ice_repr_rem(repr);
+err_repr_add:
ice_devlink_destroy_vf_port(vf);
-err_devlink:
- kfree(repr->q_vector);
- vf->repr->q_vector = NULL;
-err_alloc_q_vector:
- free_netdev(repr->netdev);
- repr->netdev = NULL;
-err_alloc:
- kfree(repr);
- vf->repr = NULL;
- return err;
+ return ERR_PTR(err);
}
-/**
- * ice_repr_rem - remove representor from VF
- * @vf: pointer to VF structure
- */
-static void ice_repr_rem(struct ice_vf *vf)
+struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
{
- if (!vf->repr)
- return;
+ if (!vsi->vf)
+ return NULL;
- kfree(vf->repr->q_vector);
- vf->repr->q_vector = NULL;
- unregister_netdev(vf->repr->netdev);
- ice_devlink_destroy_vf_port(vf);
- free_netdev(vf->repr->netdev);
- vf->repr->netdev = NULL;
- kfree(vf->repr);
- vf->repr = NULL;
-
- ice_virtchnl_set_dflt_ops(vf);
-}
-
-/**
- * ice_repr_rem_from_all_vfs - remove port representor for all VFs
- * @pf: pointer to PF structure
- */
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
-{
- struct devlink *devlink;
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf)
- ice_repr_rem(vf);
-
- /* since all port representors are destroyed, there is
- * no point in keeping the nodes
- */
- devlink = priv_to_devlink(pf);
- devl_lock(devlink);
- devl_rate_nodes_destroy(devlink);
- devl_unlock(devlink);
-}
-
-/**
- * ice_repr_add_for_all_vfs - add port representor for all VFs
- * @pf: pointer to PF structure
- */
-int ice_repr_add_for_all_vfs(struct ice_pf *pf)
-{
- struct devlink *devlink;
- struct ice_vf *vf;
- unsigned int bkt;
- int err;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- err = ice_repr_add(vf);
- if (err)
- goto err;
- }
-
- /* only export if ADQ and DCB disabled */
- if (ice_is_adq_active(pf) || ice_is_dcb_active(pf))
- return 0;
-
- devlink = priv_to_devlink(pf);
- ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
-
- return 0;
-
-err:
- ice_repr_rem_from_all_vfs(pf);
-
- return err;
+ return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
}
/**
@@ -446,15 +442,3 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr)
netif_carrier_off(repr->netdev);
netif_tx_stop_all_queues(repr->netdev);
}
-
-/**
- * ice_repr_set_traffic_vsi - set traffic VSI for port representor
- * @repr: repr on with VSI will be set
- * @vsi: pointer to VSI that will be used by port representor to pass traffic
- */
-void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
-{
- struct ice_netdev_priv *np = netdev_priv(repr->netdev);
-
- np->vsi = vsi;
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index e1ee2d2c1d2d..cff730b15ca0 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -6,27 +6,38 @@
#include <net/dst_metadata.h>
+struct ice_repr_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+
struct ice_repr {
struct ice_vsi *src_vsi;
struct ice_vf *vf;
- struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
-#ifdef CONFIG_ICE_SWITCHDEV
- /* info about slow path rule */
- struct ice_rule_query_data sp_rule;
-#endif
+ struct ice_repr_pcpu_stats __percpu *stats;
+ u32 id;
+ u8 parent_mac[ETH_ALEN];
};
-int ice_repr_add_for_all_vfs(struct ice_pf *pf);
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
+struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
+void ice_repr_rem_vf(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
-void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
-
-struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
+struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
+
+struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
+
+void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
+ int xmit_status);
+void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2f4a621254e8..ecf8f5d60292 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -237,7 +237,7 @@ static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u32 node_teid)
{
- DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
u16 buf_size = __struct_size(buf);
u16 num_groups_removed = 0;
int status;
@@ -1128,12 +1128,11 @@ u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 5 or less sw_entry_point_layer
*/
/* calculate the VSI layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+ else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
+ /* qgroup and VSI layers are same */
+ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@@ -1150,13 +1149,10 @@ u8 ice_sched_get_agg_layer(struct ice_hw *hw)
* 7 or less sw_entry_point_layer
*/
/* calculate the aggregator layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
- return hw->sw_entry_point_layer;
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
+ else
+ return hw->sw_entry_point_layer;
}
/**
@@ -1387,8 +1383,7 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
u32 val, clk_src;
val = rd32(hw, GLGEN_CLKSTAT_SRC);
- clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
- GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
+ clk_src = FIELD_GET(GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M, val);
#define PSM_CLK_SRC_367_MHZ 0x0
#define PSM_CLK_SRC_416_MHZ 0x1
@@ -1511,10 +1506,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
{
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
+ u8 qgrp_layer, vsi_layer;
u16 max_children;
- u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@@ -1525,6 +1521,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_node)
return NULL;
+ /* If the queue group and VSI layer are same then queues
+ * are all attached directly to VSI
+ */
+ if (qgrp_layer == vsi_layer)
+ return vsi_node;
+
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
@@ -2220,7 +2222,7 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
@@ -3200,7 +3202,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
u8 profile_type;
int status;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (!pi || layer_num >= pi->hw->num_tx_sched_layers)
return NULL;
switch (rl_type) {
case ICE_MIN_BW:
@@ -3216,8 +3218,6 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
return NULL;
}
- if (!pi)
- return NULL;
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@@ -3447,7 +3447,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
int status = 0;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (layer_num >= pi->hw->num_tx_sched_layers)
return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 1aef05ea5a57..7b668083be07 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -6,6 +6,17 @@
#include "ice_common.h"
+/**
+ * DOC: ice_sched.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * scheduler. It consists of defines related to layers, structures related to
+ * aggregator, functions declarations and others.
+ */
+
+#define ICE_SCHED_5_LAYERS 5
+#define ICE_SCHED_9_LAYERS 9
+
#define SCHED_NODE_NAME_MAX_LEN 32
#define ICE_QGRP_LAYER_OFFSET 2
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 2a5e6616cc0a..067712f4923f 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -106,10 +106,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
for (v = first; v <= last; v++) {
u32 reg;
- reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
- GLINT_VECT2FUNC_IS_PF_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
+ reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) |
+ FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
@@ -174,11 +172,10 @@ void ice_free_vfs(struct ice_pf *pf)
mutex_lock(&vfs->table_lock);
- ice_eswitch_release(pf);
-
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
+ ice_eswitch_detach(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
@@ -228,7 +225,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_vsi *vsi;
params.type = ICE_VSI_VF;
- params.pi = ice_vf_get_port_info(vf);
+ params.port_info = ice_vf_get_port_info(vf);
params.vf = vf;
params.flags = ICE_VSI_FLAG_INIT;
@@ -241,7 +238,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
}
vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
return vsi;
}
@@ -274,24 +270,20 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
(device_based_first_msix + vf->num_msix) - 1;
device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
- VPINT_ALLOC_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
- VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
+ reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) |
+ FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) |
+ VPINT_ALLOC_VALID_M;
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
- & VPINT_ALLOC_PCI_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
- VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
+ reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) |
+ FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) |
+ VPINT_ALLOC_PCI_VALID_M;
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
- reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
- GLINT_VECT2FUNC_VF_NUM_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
+ reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) |
+ FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
@@ -324,10 +316,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
*/
- reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
- VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
- VPLAN_TX_QBASE_VFNUMQ_M));
+ reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) |
+ FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1);
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else {
dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
@@ -342,10 +332,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
*/
- reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
- VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
- VPLAN_RX_QBASE_VFNUMQ_M));
+ reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) |
+ FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1);
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else {
dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
@@ -372,18 +360,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
* @vf: VF to calculate the register index for
* @q_vector: a q_vector associated to the VF
*/
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
- struct ice_pf *pf;
-
if (!vf || !q_vector)
- return -EINVAL;
-
- pf = vf->pf;
+ return;
/* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
- q_vector->v_idx + 1;
+ q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF;
+ q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx;
}
/**
@@ -614,6 +598,14 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
+ retval = ice_eswitch_attach(pf, vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
+ vf->vf_id, retval);
+ ice_vf_vsi_release(vf);
+ goto teardown;
+ }
+
set_bit(ICE_VF_STATE_INIT, vf->vf_states);
ice_ena_vf_mappings(vf);
wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
@@ -768,24 +760,6 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
}
/**
- * ice_sriov_create_vsi - Create a new VSI for a VF
- * @vf: VF to create the VSI for
- *
- * This is called by ice_vf_recreate_vsi to create the new VSI after the old
- * VSI has been released.
- */
-static int ice_sriov_create_vsi(struct ice_vf *vf)
-{
- struct ice_vsi *vsi;
-
- vsi = ice_vf_vsi_setup(vf);
- if (!vsi)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
* ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
* @vf: VF to perform tasks on
*/
@@ -804,7 +778,6 @@ static const struct ice_vf_ops ice_sriov_vf_ops = {
.poll_reset_status = ice_sriov_poll_reset_status,
.clear_reset_trigger = ice_sriov_clear_reset_trigger,
.irq_close = NULL,
- .create_vsi = ice_sriov_create_vsi,
.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
};
@@ -859,11 +832,6 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
pci_dev_get(vfdev);
- /* set default number of MSI-X */
- vf->num_msix = pf->vfs.num_msix_per;
- vf->num_vf_qs = pf->vfs.num_qps_per;
- ice_vc_set_default_allowlist(vf);
-
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
@@ -932,12 +900,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
clear_bit(ICE_VF_DIS, pf->state);
- ret = ice_eswitch_configure(pf);
- if (ret) {
- dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
- goto err_unroll_sriov;
- }
-
/* rearm global interrupts */
if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -1098,6 +1060,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
struct ice_pf *pf = pci_get_drvdata(pdev);
u16 prev_msix, prev_queues, queues;
bool needs_rebuild = false;
+ struct ice_vsi *vsi;
struct ice_vf *vf;
int id;
@@ -1132,6 +1095,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (!vf)
return -ENOENT;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -ENOENT;
+
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
@@ -1152,8 +1119,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (vf->first_vector_idx < 0)
goto unroll;
- ice_vf_vsi_release(vf);
- if (vf->vf_ops->create_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
/* Try to rebuild with previous values */
needs_rebuild = true;
goto unroll;
@@ -1179,8 +1145,10 @@ unroll:
if (vf->first_vector_idx < 0)
return -EINVAL;
- if (needs_rebuild)
- vf->vf_ops->create_vsi(vf);
+ if (needs_rebuild) {
+ ice_vf_reconfig_vsi(vf);
+ ice_vf_init_host_cfg(vf, vsi);
+ }
ice_ena_vf_mappings(vf);
ice_put_vf(vf);
@@ -1329,8 +1297,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
/* event returns device global Rx queue number */
- queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
- GLDCB_RTCTQ_RXQNUM_S;
+ queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq);
vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
if (!vf)
@@ -1895,6 +1862,24 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
}
/**
+ * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_tx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dev_lan_addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
* ice_print_vfs_mdd_events - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -1902,8 +1887,6 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
*/
void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
struct ice_vf *vf;
unsigned int bkt;
@@ -1930,10 +1913,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
vf->mdd_tx_events.last_printed =
vf->mdd_tx_events.count;
-
- dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
- vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
- vf->dev_lan_addr);
+ ice_print_vf_tx_mdd_event(vf);
}
}
mutex_unlock(&pf->vfs.table_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 8488df38b586..8f22313474d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -49,7 +49,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
@@ -58,6 +58,7 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+void ice_print_vf_tx_mdd_event(struct ice_vf *vf);
bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
@@ -69,6 +70,7 @@ static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_print_vf_tx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { }
static inline int
@@ -130,11 +132,10 @@ ice_set_vf_bw(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline int
+static inline void
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
- return 0;
}
static inline int
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ee19f3aa3d19..94d6670d0901 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -42,6 +42,7 @@ enum {
ICE_PKT_KMALLOC = BIT(9),
ICE_PKT_PPPOE = BIT(10),
ICE_PKT_L2TPV3 = BIT(11),
+ ICE_PKT_PFCP = BIT(12),
};
struct ice_dummy_pkt_offsets {
@@ -1110,6 +1111,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00,
};
+ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_PFCP, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */
+ 0x00, 0x18, 0x00, 0x00,
+
+ 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_PFCP, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x18, 0x00, 0x00,
+
+ 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -1343,6 +1415,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP),
ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
ICE_PKT_INNER_UDP),
ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
@@ -1812,7 +1886,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
int status;
@@ -2025,12 +2099,12 @@ error_out:
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2042,7 +2116,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
/* Set the recipe ID bit in the bitmask to let the device know which
* profile we are associating the recipe to
*/
- memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+ cmd->recipe_assoc = cpu_to_le64(r_assoc);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -2051,12 +2125,12 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* ice_aq_get_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2069,26 +2143,42 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (!status)
- memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+ *r_assoc = le64_to_cpu(cmd->recipe_assoc);
return status;
}
/**
+ * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported
+ * @hw: pointer to the hardware structure
+ */
+void ice_init_chk_recipe_reuse_support(struct ice_hw *hw)
+{
+ struct ice_nvm_info *nvm = &hw->flash.nvm;
+
+ hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) ||
+ nvm->major > 0x4;
+}
+
+/**
* ice_alloc_recipe - add recipe resource
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
*/
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
+ u16 res_type;
int status;
sw_buf->num_elems = cpu_to_le16(1);
- sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
- ICE_AQC_RES_TYPE_S) |
- ICE_AQC_RES_TYPE_FLAG_SHARED);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE);
+ if (hw->recp_reuse)
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED;
+ else
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
+ sw_buf->res_type = cpu_to_le16(res_type);
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
ice_aqc_opc_alloc_res);
if (!status)
@@ -2098,6 +2188,70 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
}
/**
+ * ice_free_recipe_res - free recipe resource
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to free
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_free_recipe_res(struct ice_hw *hw, u16 rid)
+{
+ return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
+}
+
+/**
+ * ice_release_recipe_res - disassociate and free recipe resource
+ * @hw: pointer to the hardware structure
+ * @recp: the recipe struct resource to unassociate and free
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_release_recipe_res(struct ice_hw *hw,
+ struct ice_sw_recipe *recp)
+{
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ struct ice_switch_info *sw = hw->switch_info;
+ u64 recp_assoc;
+ u32 rid, prof;
+ int status;
+
+ for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) {
+ for_each_set_bit(prof, recipe_to_profile[rid],
+ ICE_MAX_NUM_PROFILES) {
+ status = ice_aq_get_recipe_to_profile(hw, prof,
+ &recp_assoc,
+ NULL);
+ if (status)
+ return status;
+
+ bitmap_from_arr64(r_bitmap, &recp_assoc,
+ ICE_MAX_NUM_RECIPES);
+ bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ bitmap_to_arr64(&recp_assoc, r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ ice_aq_map_recipe_to_profile(hw, prof,
+ recp_assoc, NULL);
+
+ clear_bit(rid, profile_to_recipe[prof]);
+ clear_bit(prof, recipe_to_profile[rid]);
+ }
+
+ status = ice_free_recipe_res(hw, rid);
+ if (status)
+ return status;
+
+ sw->recp_list[rid].recp_created = false;
+ sw->recp_list[rid].adv_rule = false;
+ memset(&sw->recp_list[rid].lkup_exts, 0,
+ sizeof(sw->recp_list[rid].lkup_exts));
+ clear_bit(rid, recp->r_bitmap);
+ }
+
+ return 0;
+}
+
+/**
* ice_get_recp_to_prof_map - updates recipe to profile mapping
* @hw: pointer to hardware structure
*
@@ -2108,6 +2262,7 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
static void ice_get_recp_to_prof_map(struct ice_hw *hw)
{
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 i;
for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
@@ -2115,8 +2270,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
- if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))
continue;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_copy(profile_to_recipe[i], r_bitmap,
ICE_MAX_NUM_RECIPES);
for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
@@ -2144,6 +2300,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
* @recps: struct that we need to populate
* @rid: recipe ID that we are populating
* @refresh_required: true if we should get recipe to profile mapping from FW
+ * @is_add: flag of adding recipe
*
* This function is used to populate all the necessary entries into our
* bookkeeping so that we have a current list of all the recipes that are
@@ -2151,7 +2308,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
*/
static int
ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
- bool *refresh_required)
+ bool *refresh_required, bool is_add)
{
DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
struct ice_aqc_recipe_data_elem *tmp;
@@ -2268,8 +2425,12 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
}
- if (!is_root)
+ if (!is_root) {
+ if (hw->recp_reuse && is_add)
+ recps[idx].recp_created = true;
+
continue;
+ }
/* Only do the following for root recipes entries */
memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
@@ -2293,7 +2454,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Copy result indexes */
bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
- recps[rid].recp_created = true;
+ if (is_add)
+ recps[rid].recp_created = true;
err_unroll:
kfree(tmp);
@@ -2444,6 +2606,9 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->lan_en = true;
}
}
+
+ if (fi->flag & ICE_FLTR_TX_ONLY)
+ fi->lan_en = false;
}
/**
@@ -2492,25 +2657,24 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
switch (f_info->fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ f_info->fwd_id.hw_vsi_id);
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_VSI_LIST:
act |= ICE_SINGLE_ACT_VSI_LIST;
- act |= (f_info->fwd_id.vsi_list_id <<
- ICE_SINGLE_ACT_VSI_LIST_ID_S) &
- ICE_SINGLE_ACT_VSI_LIST_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_LIST_ID_M,
+ f_info->fwd_id.vsi_list_id);
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_Q:
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ f_info->fwd_id.q_id);
break;
case ICE_DROP_PACKET:
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
@@ -2520,10 +2684,9 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
q_rgn = f_info->qgrp_size > 0 ?
(u8)ilog2(f_info->qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
- act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
- ICE_SINGLE_ACT_Q_REGION_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ f_info->fwd_id.q_id);
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn);
break;
default:
return;
@@ -2649,7 +2812,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
+ act |= FIELD_PREP(ICE_LG_ACT_VSI_LIST_ID_M, id);
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->act[0] = cpu_to_le32(act);
@@ -2657,16 +2820,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Second action descriptor type */
act = ICE_LG_ACT_GENERIC;
- act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+ act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, 1);
lg_act->act[1] = cpu_to_le32(act);
- act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
- ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
+ act = FIELD_PREP(ICE_LG_ACT_GENERIC_OFFSET_M,
+ ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX);
/* Third action Marker value */
act |= ICE_LG_ACT_GENERIC;
- act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
- ICE_LG_ACT_GENERIC_VALUE_M;
+ act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, sw_marker);
lg_act->act[2] = cpu_to_le32(act);
@@ -2675,9 +2837,9 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_aqc_opc_update_sw_rules);
/* Update the action to point to the large action ID */
- rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
- ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
- ICE_SINGLE_ACT_PTR_VAL_M));
+ act = ICE_SINGLE_ACT_PTR;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_PTR_VAL_M, l_id);
+ rx_tx->act = cpu_to_le32(act);
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
@@ -3822,6 +3984,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
+ f_info.flag |= ICE_FLTR_TX_ONLY;
}
f_list_entry.fltr_info = f_info;
@@ -4421,13 +4584,13 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
buf->num_elems = cpu_to_le16(num_items);
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) |
+ alloc_shared);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
@@ -4449,13 +4612,13 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
buf->num_elems = cpu_to_le16(num_items);
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) |
+ alloc_shared);
buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
@@ -4479,20 +4642,17 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
+ u16 res_type;
int status;
buf->num_elems = cpu_to_le16(1);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, type);
if (shared)
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) |
- ICE_AQC_RES_TYPE_FLAG_SHARED);
- else
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) &
- ~ICE_AQC_RES_TYPE_FLAG_SHARED);
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
+ buf->res_type = cpu_to_le16(res_type);
buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
status = ice_aq_alloc_free_res(hw, buf, buf_len,
ice_aqc_opc_share_res);
@@ -4532,6 +4692,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
+ ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
@@ -4565,6 +4726,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_PFCP, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
{ ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
@@ -4577,12 +4739,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
* @rinfo: information regarding the rule e.g. priority and action info
+ * @is_add: flag of adding recipe
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
static u16
ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
- const struct ice_adv_rule_info *rinfo)
+ const struct ice_adv_rule_info *rinfo, bool is_add)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -4596,11 +4759,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
* entry update it in our SW bookkeeping and continue with the
* matching.
*/
- if (!recp[i].recp_created)
+ if (hw->recp_reuse) {
if (ice_get_recp_frm_fw(hw,
hw->switch_info->recp_list, i,
- &refresh_required))
+ &refresh_required, is_add))
continue;
+ }
/* Skip inverse action recipes */
if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
@@ -5024,8 +5188,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
entry->chain_idx = chain_idx;
content->result_indx =
ICE_AQ_RECIPE_RESULT_EN |
- ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
- ICE_AQ_RECIPE_RESULT_DATA_M);
+ FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
+ chain_idx);
clear_bit(chain_idx, result_idx_bm);
chain_idx = find_first_bit(result_idx_bm,
ICE_MAX_FV_WORDS);
@@ -5272,6 +5436,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_GTPC:
prof_type = ICE_PROF_TUN_GTPC;
break;
+ case ICE_SW_TUN_PFCP:
+ prof_type = ICE_PROF_TUN_PFCP;
+ break;
case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
@@ -5282,6 +5449,49 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
}
/**
+ * ice_subscribe_recipe - subscribe to an existing recipe
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to subscribe to
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
+ u16 res_type;
+ int status;
+
+ /* Prepare buffer to allocate resource */
+ sw_buf->num_elems = cpu_to_le16(1);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) |
+ ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED |
+ ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL;
+ sw_buf->res_type = cpu_to_le16(res_type);
+
+ sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid);
+
+ status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res);
+
+ return status;
+}
+
+/**
+ * ice_subscribable_recp_shared - share an existing subscribable recipe
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to subscribe to
+ */
+static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid)
+{
+ struct ice_sw_recipe *recps = hw->switch_info->recp_list;
+ u16 sub_rid;
+
+ for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES)
+ ice_subscribe_recipe(hw, sub_rid);
+}
+
+/**
* ice_add_adv_recipe - Add an advanced recipe that is not part of the default
* @hw: pointer to hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5303,6 +5513,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_sw_fv_list_entry *tmp;
struct ice_sw_recipe *rm;
int status = 0;
+ u16 rid_tmp;
u8 i;
if (!lkups_cnt)
@@ -5380,10 +5591,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts, rinfo);
- if (*rid < ICE_MAX_NUM_RECIPES)
+ *rid = ice_find_recp(hw, lkup_exts, rinfo, true);
+ if (*rid < ICE_MAX_NUM_RECIPES) {
/* Success if found a recipe that match the existing criteria */
+ if (hw->recp_reuse)
+ ice_subscribable_recp_shared(hw, *rid);
+
goto err_unroll;
+ }
rm->tun_type = rinfo->tun_type;
/* Recipe we need does not exist, add a recipe */
@@ -5396,26 +5611,28 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
list_for_each_entry(fvit, &rm->fv_list, list_entry) {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 j;
status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap, NULL);
+ &recp_assoc, NULL);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
ICE_MAX_NUM_RECIPES);
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
+ bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap,
- NULL);
+ recp_assoc, NULL);
ice_release_change_lock(hw);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
/* Update profile to recipe bitmap array */
bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
@@ -5429,6 +5646,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*rid = rm->root_rid;
memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
sizeof(*lkup_exts));
+ goto err_unroll;
+
+err_free_recipe:
+ if (hw->recp_reuse) {
+ for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) {
+ if (!ice_free_recipe_res(hw, rid_tmp))
+ clear_bit(rid_tmp, rm->r_bitmap);
+ }
+ }
+
err_unroll:
list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
list_del(&r_entry->l_entry);
@@ -5554,6 +5781,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_SW_TUN_VXLAN:
match |= ICE_PKT_TUN_UDP;
break;
+ case ICE_SW_TUN_PFCP:
+ match |= ICE_PKT_PFCP;
+ break;
default:
break;
}
@@ -5694,6 +5924,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GTP:
len = sizeof(struct ice_udp_gtp_hdr);
break;
+ case ICE_PFCP:
+ len = sizeof(struct ice_pfcp_hdr);
+ break;
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;
@@ -6065,6 +6298,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET ||
rinfo->sw_act.fltr_act == ICE_NOP)) {
status = -EIO;
goto free_pkt_profile;
@@ -6077,9 +6311,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
- rinfo->sw_act.fltr_act == ICE_NOP)
+ rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_NOP) {
rinfo->sw_act.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
+ }
if (rinfo->src_vsi)
rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
@@ -6115,38 +6351,45 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = -ENOMEM;
goto free_pkt_profile;
}
- if (!rinfo->flags_info.act_valid) {
- act |= ICE_SINGLE_ACT_LAN_ENABLE;
- act |= ICE_SINGLE_ACT_LB_ENABLE;
- } else {
- act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
- ICE_SINGLE_ACT_LB_ENABLE);
+
+ if (rinfo->sw_act.fltr_act != ICE_MIRROR_PACKET) {
+ if (!rinfo->flags_info.act_valid) {
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ } else {
+ act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_LB_ENABLE);
+ }
}
switch (rinfo->sw_act.fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
- ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_Q:
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ rinfo->sw_act.fwd_id.q_id);
break;
case ICE_FWD_TO_QGRP:
q_rgn = rinfo->sw_act.qgrp_size > 0 ?
(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
- act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
- ICE_SINGLE_ACT_Q_REGION_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ rinfo->sw_act.fwd_id.q_id);
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn);
break;
case ICE_DROP_PACKET:
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
ICE_SINGLE_ACT_VALID_BIT;
break;
+ case ICE_MIRROR_PACKET:
+ act |= ICE_SINGLE_ACT_OTHER_ACTS;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
+ break;
case ICE_NOP:
act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
rinfo->sw_act.fwd_id.hw_vsi_id);
@@ -6432,7 +6675,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO;
}
- rid = ice_find_recp(hw, &lkup_exts, rinfo);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo, false);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return -EINVAL;
@@ -6476,14 +6719,21 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ice_aqc_opc_remove_sw_rules, NULL);
if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info;
+ struct ice_sw_recipe *r_list = sw->recp_list;
mutex_lock(rule_lock);
list_del(&list_elem->list_entry);
devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
devm_kfree(ice_hw_to_dev(hw), list_elem);
mutex_unlock(rule_lock);
- if (list_empty(&sw->recp_list[rid].filt_rules))
- sw->recp_list[rid].adv_rule = false;
+ if (list_empty(&r_list[rid].filt_rules)) {
+ r_list[rid].adv_rule = false;
+
+ /* All rules for this recipe are now removed */
+ if (hw->recp_reuse)
+ ice_release_recipe_res(hw,
+ &r_list[rid]);
+ }
}
kfree(s_rule);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index db7e501b7e0a..ad98e98c812d 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -8,8 +8,9 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
-#define ICE_FLTR_RX BIT(0)
-#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_TX_ONLY BIT(2)
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
@@ -21,6 +22,8 @@
#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
+#define ICE_PROFID_IPV4_PFCP_NODE 79
+#define ICE_PROFID_IPV6_PFCP_SESSION 82
#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
@@ -424,10 +427,11 @@ int ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd);
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd);
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd);
+void ice_init_chk_recipe_reuse_support(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 37b54db91df2..8bd24b33f3a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
* - Tunnel flag (present if tunnel)
*/
+ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+ lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -35,7 +37,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
lkups_cnt++;
- if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+ if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS)
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS)
lkups_cnt++;
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
@@ -138,6 +143,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GTP;
case TNL_GTPC:
return ICE_GTP_NO_PAY;
+ case TNL_PFCP:
+ return ICE_PFCP;
default:
return 0;
}
@@ -157,6 +164,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GTPU;
case TNL_GTPC:
return ICE_SW_TUN_GTPC;
+ case TNL_PFCP:
+ return ICE_SW_TUN_PFCP;
default:
return ICE_NON_TUN;
}
@@ -219,8 +228,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
- (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+ if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) {
list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
if (fltr->gtp_pdu_info_masks.pdu_type) {
@@ -237,6 +245,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) {
+ struct ice_pfcp_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.pfcp_hdr;
+ hdr_m = &list[i].m_u.pfcp_hdr;
+ list[i].type = ICE_PFCP;
+
+ hdr_h->flags = fltr->pfcp_meta_keys.type;
+ hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01;
+
+ hdr_h->seid = fltr->pfcp_meta_keys.seid;
+ hdr_m->seid = fltr->pfcp_meta_masks.seid;
+
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@@ -363,12 +387,20 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* Always add direction metadata */
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ ice_rule_add_src_vsi_metadata(&list[i]);
+ i++;
+ }
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
- headers = &tc_fltr->inner_headers;
- inner = true;
+ /* PFCP is considered non-tunneled - don't swap headers. */
+ if (tc_fltr->tunnel_type != TNL_PFCP) {
+ headers = &tc_fltr->inner_headers;
+ inner = true;
+ }
}
if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
@@ -622,6 +654,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
*/
if (netif_is_gtp(tunnel_dev))
return TNL_GTPU;
+ if (netif_is_pfcp(tunnel_dev))
+ return TNL_PFCP;
return TNL_LAST;
}
@@ -630,32 +664,98 @@ bool ice_is_tunnel_supported(struct net_device *dev)
return ice_tc_tun_get_type(dev) != TNL_LAST;
}
-static int
-ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
- struct flow_action_entry *act)
+static bool ice_tc_is_dev_uplink(struct net_device *dev)
+{
+ return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
+}
+
+static int ice_tc_setup_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct net_device *target_dev,
+ enum ice_sw_fwd_act_type action)
{
struct ice_repr *repr;
+ if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided");
+ return -EINVAL;
+ }
+
+ fltr->action.fltr_act = action;
+
+ if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_tc_is_dev_uplink(target_dev)) {
+ repr = ice_netdev_to_repr(filter_dev);
+
+ fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_tc_is_dev_uplink(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_tc_setup_drop_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr)
+{
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+
+ if (ice_is_port_repr_netdev(filter_dev)) {
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_tc_is_dev_uplink(filter_dev)) {
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ int err;
+
switch (act->id) {
case FLOW_ACTION_DROP:
- fltr->action.fltr_act = ICE_DROP_PACKET;
+ err = ice_tc_setup_drop_action(filter_dev, fltr);
+ if (err)
+ return err;
+
break;
case FLOW_ACTION_REDIRECT:
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ err = ice_tc_setup_action(filter_dev, fltr,
+ act->dev, ICE_FWD_TO_VSI);
+ if (err)
+ return err;
- if (ice_is_port_repr_netdev(act->dev)) {
- repr = ice_netdev_to_repr(act->dev);
+ break;
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else if (netif_is_ice(act->dev) ||
- ice_is_tunnel_supported(act->dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
- return -EINVAL;
- }
+ case FLOW_ACTION_MIRRED:
+ err = ice_tc_setup_action(filter_dev, fltr,
+ act->dev, ICE_MIRROR_PACKET);
+ if (err)
+ return err;
break;
@@ -680,7 +780,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
int ret;
int i;
- if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
@@ -696,10 +796,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
goto exit;
}
- /* egress traffic is always redirect to uplink */
- if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
- fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
-
rule_info.sw_act.fltr_act = fltr->action.fltr_act;
if (fltr->action.fltr_act != ICE_DROP_PACKET)
rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
@@ -713,17 +809,26 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.flags_info.act_valid = true;
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ /* Uplink to VF */
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
- } else {
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
+ /* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ } else {
+ /* VF to VF */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
}
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
+ rule_info.src_vsi = vsi->idx;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
@@ -1305,7 +1410,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
}
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
+ (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
struct flow_match_enc_opts match;
flow_rule_match_enc_opts(rule, &match);
@@ -1316,7 +1422,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
sizeof(struct gtp_pdu_session_info));
- fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
+ fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
+ fltr->tunnel_type == TNL_PFCP) {
+ struct flow_match_enc_opts match;
+
+ flow_rule_match_enc_opts(rule, &match);
+
+ memcpy(&fltr->pfcp_meta_keys, match.key->data,
+ sizeof(struct pfcp_metadata));
+ memcpy(&fltr->pfcp_meta_masks, match.mask->data,
+ sizeof(struct pfcp_metadata));
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS;
}
return 0;
@@ -1377,15 +1497,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return err;
}
- /* header pointers should point to the inner headers, outer
- * header were already set by ice_parse_tunnel_attr
- */
- headers = &fltr->inner_headers;
+ /* PFCP is considered non-tunneled - don't swap headers. */
+ if (fltr->tunnel_type != TNL_PFCP) {
+ /* Header pointers should point to the inner headers,
+ * outer header were already set by
+ * ice_parse_tunnel_attr().
+ */
+ headers = &fltr->inner_headers;
+ }
} else if (dissector->used_keys &
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {
@@ -1531,6 +1658,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ fltr->extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -1745,16 +1876,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
/**
* ice_parse_tc_flower_actions - Parse the actions for a TC filter
+ * @filter_dev: Pointer to device on which filter is being added
* @vsi: Pointer to VSI
* @cls_flower: Pointer to TC flower offload structure
* @fltr: Pointer to TC flower filter structure
*
* Parse the actions for a TC filter
*/
-static int
-ice_parse_tc_flower_actions(struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower,
- struct ice_tc_flower_fltr *fltr)
+static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
+ struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
struct flow_action *flow_action = &rule->action;
@@ -1769,7 +1901,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
flow_action_for_each(i, act, flow_action) {
if (ice_is_eswitch_mode_switchdev(vsi->back))
- err = ice_eswitch_tc_parse_action(fltr, act);
+ err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
else
err = ice_tc_parse_action(vsi, fltr, act);
if (err)
@@ -1856,7 +1988,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
if (err < 0)
goto err;
- err = ice_parse_tc_flower_actions(vsi, f, fltr);
+ err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
if (err < 0)
goto err;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 65d387163a46..d84f153517ec 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -4,6 +4,9 @@
#ifndef _ICE_TC_LIB_H_
#define _ICE_TC_LIB_H_
+#include <linux/bits.h>
+#include <net/pfcp.h>
+
#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
@@ -22,7 +25,7 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
-#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18)
#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
@@ -34,6 +37,7 @@
#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29)
+#define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -161,6 +165,8 @@ struct ice_tc_flower_fltr {
__be32 tenant_id;
struct gtp_pdu_session_info gtp_pdu_info_keys;
struct gtp_pdu_session_info gtp_pdu_info_masks;
+ struct pfcp_metadata pfcp_meta_keys;
+ struct pfcp_metadata pfcp_meta_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 52d0a126eb61..8bb743f78fcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
- if (rx_ring->vsi->type == ICE_VSI_PF &&
- !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
- goto err;
return 0;
err:
@@ -557,13 +552,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
* @rx_buf: Rx buffer to store the XDP action
+ * @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf)
+ struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -571,6 +567,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (!xdp_prog)
goto exit;
+ ice_xdp_meta_set_desc(xdp, eop_desc);
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
@@ -600,9 +598,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- rx_buf->act = ret;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ ice_set_rx_bufs_act(xdp, rx_ring, ret);
}
/**
@@ -890,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
}
if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
return -ENOMEM;
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
+ /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+ * can pop off frags but driver has to handle it on its own
+ */
+ rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@@ -1052,8 +1051,7 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE);
if (unlikely(!skb))
return NULL;
@@ -1180,8 +1178,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, ntc);
@@ -1241,7 +1238,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
+ ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
if (rx_buf->act == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
@@ -1249,6 +1246,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1264,10 +1262,12 @@ construct_skb:
ICE_XDP_CONSUMED);
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
break;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1276,7 +1276,7 @@ construct_skb:
continue;
}
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
+ vlan_tci = ice_get_vlan_tci(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
if (eth_skb_pad(skb))
@@ -1286,14 +1286,11 @@ construct_skb:
total_rx_bytes += skb->len;
/* populate checksum, VLAN, and protocol */
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
-
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
- ice_receive_skb(rx_ring, skb, vlan_tag);
+ ice_receive_skb(rx_ring, skb, vlan_tci);
/* update budget accounting */
total_rx_pkts++;
@@ -1494,9 +1491,9 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
* be static in non-adaptive mode (user configured)
*/
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
- GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
- GLINT_DYN_CTL_WB_ON_ITR_M);
+ FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) |
+ FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) |
+ FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1));
q_vector->wb_on_itr = true;
}
@@ -2306,9 +2303,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
return;
- if (!tx_ring->ptp_tx)
- return;
-
/* Tx timestamps cannot be sampled when doing TSO */
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 166413fc33f4..feba314a3fe4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -257,6 +257,20 @@ enum ice_rx_dtype {
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
+struct ice_pkt_ctx {
+ u64 cached_phctime;
+ __be16 vlan_proto;
+};
+
+struct ice_xdp_buff {
+ struct xdp_buff xdp_buff;
+ const union ice_32b_rx_flex_desc *eop_desc;
+ const struct ice_pkt_ctx *pkt_ctx;
+};
+
+/* Required for compatibility with xdp_buffs from xsk_pool */
+static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
+
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
@@ -298,7 +312,6 @@ enum ice_dynamic_itr {
/* descriptor ring, associated with a VSI */
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -310,13 +323,24 @@ struct ice_rx_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 next_to_alloc;
- /* CL2 - 2nd cacheline starts here */
+
union {
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
- struct xdp_buff xdp;
+ /* CL2 - 2nd cacheline starts here */
+ union {
+ struct ice_xdp_buff xdp_ext;
+ struct xdp_buff xdp;
+ };
/* CL3 - 3rd cacheline starts here */
+ union {
+ struct ice_pkt_ctx pkt_ctx;
+ struct {
+ u64 cached_phctime;
+ __be16 vlan_proto;
+ };
+ };
struct bpf_prog *xdp_prog;
u16 rx_offset;
@@ -332,14 +356,16 @@ struct ice_rx_ring {
/* CL4 - 4th cacheline starts here */
struct ice_channel *ch;
struct ice_tx_ring *xdp_ring;
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
+ u32 nr_frags;
dma_addr_t dma; /* physical address of ring */
- u64 cached_phctime;
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
+#define ICE_RX_FLAGS_MULTIDEV BIT(3)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
@@ -380,7 +406,6 @@ struct ice_tx_ring {
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 7e06373e14d9..2719f0e20933 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/filter.h>
+#include <linux/net/intel/libie/rx.h>
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
@@ -39,52 +40,44 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
}
/**
- * ice_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
+ * ice_get_rx_hash - get RX hash value from descriptor
+ * @rx_desc: specific descriptor
*
- * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
- * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
- * Rx desc.
+ * Returns hash, if present, 0 otherwise.
*/
-static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
+static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc)
{
- struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
- if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
- return PKT_HASH_TYPE_L2;
-
- return PKT_HASH_TYPE_NONE;
+ const struct ice_32b_rx_flex_desc_nic *nic_mdid;
+
+ if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC))
+ return 0;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ return le32_to_cpu(nic_mdid->rss_hash);
}
/**
- * ice_rx_hash - set the hash value in the skb
+ * ice_rx_hash_to_skb - set the hash value in the skb
* @rx_ring: descriptor ring
* @rx_desc: specific descriptor
* @skb: pointer to current skb
* @rx_ptype: the ptype value from the descriptor
*/
static void
-ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 rx_ptype)
+ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u16 rx_ptype)
{
- struct ice_32b_rx_flex_desc_nic *nic_mdid;
+ struct libeth_rx_pt decoded;
u32 hash;
- if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded))
return;
- if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
- return;
-
- nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
- hash = le32_to_cpu(nic_mdid->rss_hash);
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+ hash = ice_get_rx_hash(rx_desc);
+ if (likely(hash))
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
/**
@@ -100,37 +93,33 @@ static void
ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
- struct ice_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u16 rx_status0, rx_status1;
bool ipv4, ipv6;
- rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
- rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
-
- decoded = ice_decode_rx_desc_ptype(ptype);
-
/* Start with CHECKSUM_NONE and by default csum_level = 0 */
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(ring->netdev, decoded))
return;
+ rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
- if (!(decoded.known && decoded.outer_ip))
- return;
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
- ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
+ ring->vsi->back->hw_rx_eipe_error++;
+ return;
+ }
- if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))))
goto checksum_fail;
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
@@ -151,19 +140,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case ICE_RX_PTYPE_INNER_PROT_TCP:
- case ICE_RX_PTYPE_INNER_PROT_UDP:
- case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -171,11 +151,38 @@ checksum_fail:
}
/**
+ * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb
+ * @rx_ring: Ring to get the VSI info
+ * @rx_desc: Receive descriptor
+ * @skb: Particular skb to send timestamp with
+ *
+ * The timestamp is in ns, so we must convert the result first.
+ */
+static void
+ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns);
+}
+
+/**
+ * ice_get_ptype - Read HW packet type from the descriptor
+ * @rx_desc: RX descriptor
+ */
+static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc)
+{
+ return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+}
+
+/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @ptype: the packet type decoded by hardware
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
@@ -184,38 +191,45 @@ checksum_fail:
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 ptype)
+ struct sk_buff *skb)
{
- ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+ u16 ptype = ice_get_ptype(rx_desc);
+
+ ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) {
+ struct net_device *netdev = ice_eswitch_get_target(rx_ring,
+ rx_desc);
+
+ if (ice_is_port_repr_netdev(netdev))
+ ice_repr_inc_rx_stats(netdev, skb->len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ } else {
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ }
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
if (rx_ring->ptp_rx)
- ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+ ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb);
}
/**
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: Rx ring in play
* @skb: packet to send up
- * @vlan_tag: VLAN tag for packet
+ * @vlan_tci: VLAN TCI for packet
*
* This function sends the completed packet (via. skb) up the stack using
* gro receive functions (with/without VLAN tag)
*/
void
-ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci)
{
- netdev_features_t features = rx_ring->netdev->features;
- bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
+ if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto)
+ __vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto,
+ vlan_tci);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -464,3 +478,84 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
spin_unlock(&xdp_ring->tx_lock);
}
}
+
+/**
+ * ice_xdp_rx_hw_ts - HW timestamp XDP hint handler
+ * @ctx: XDP buff pointer
+ * @ts_ns: destination address
+ *
+ * Copy HW timestamp (if available) to the destination address.
+ */
+static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
+ xdp_ext->pkt_ctx);
+ if (!*ts_ns)
+ return -ENODATA;
+
+ return 0;
+}
+
+/**
+ * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
+ * @eop_desc: End of Packet descriptor
+ */
+static enum xdp_rss_hash_type
+ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
+{
+ return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type;
+}
+
+/**
+ * ice_xdp_rx_hash - RX hash XDP hint handler
+ * @ctx: XDP buff pointer
+ * @hash: hash destination address
+ * @rss_type: XDP hash type destination address
+ *
+ * Copy RX hash (if available) and its type to the destination address.
+ */
+static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *hash = ice_get_rx_hash(xdp_ext->eop_desc);
+ *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
+ if (!likely(*hash))
+ return -ENODATA;
+
+ return 0;
+}
+
+/**
+ * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler
+ * @ctx: XDP buff pointer
+ * @vlan_proto: destination address for VLAN protocol
+ * @vlan_tci: destination address for VLAN TCI
+ *
+ * Copy VLAN tag (if was stripped) and corresponding protocol
+ * to the destination address.
+ */
+static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
+ if (!*vlan_proto)
+ return -ENODATA;
+
+ *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
+ if (!*vlan_tci)
+ return -ENODATA;
+
+ return 0;
+}
+
+const struct xdp_metadata_ops ice_xdp_md_ops = {
+ .xmo_rx_timestamp = ice_xdp_rx_hw_ts,
+ .xmo_rx_hash = ice_xdp_rx_hash,
+ .xmo_rx_vlan_tag = ice_xdp_rx_vlan_tag,
+};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 115969ecdf7b..afcead4baef4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -12,26 +12,39 @@
* act: action to store onto Rx buffers related to XDP buffer parts
*
* Set action that should be taken before putting Rx buffer from first frag
- * to one before last. Last one is handled by caller of this function as it
- * is the EOP frag that is currently being processed. This function is
- * supposed to be called only when XDP buffer contains frags.
+ * to the last.
*/
static inline void
ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
const unsigned int act)
{
- const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
- u32 first = rx_ring->first_desc;
- u32 nr_frags = sinfo->nr_frags;
+ u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
struct ice_rx_buf *buf;
for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[first];
+ buf = &rx_ring->rx_buf[idx];
buf->act = act;
- if (++first == cnt)
- first = 0;
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ /* adjust pagecnt_bias on frags freed by XDP prog */
+ if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+ u32 delta = rx_ring->nr_frags - sinfo_frags;
+
+ while (delta) {
+ if (idx == 0)
+ idx = cnt - 1;
+ else
+ idx--;
+ buf = &rx_ring->rx_buf[idx];
+ buf->pagecnt_bias--;
+ delta--;
+ }
}
}
@@ -84,7 +97,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
- * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
+ * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
* The OS and current PF implementation only support stripping a single VLAN tag
@@ -92,7 +105,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
* one is found return the tag, else return 0 to mean no VLAN tag was found.
*/
static inline u16
-ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
+ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
{
u16 stat_err_bits;
@@ -148,7 +161,17 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 ptype);
+ struct sk_buff *skb);
void
-ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
+
+static inline void
+ice_xdp_meta_set_desc(struct xdp_buff *xdp,
+ union ice_32b_rx_flex_desc *eop_desc)
+{
+ struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
+ xdp_buff);
+
+ xdp_ext->eop_desc = eop_desc;
+}
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a18ca0ff879f..f0796a93f428 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,6 +17,7 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
+#include "ice_fwlog.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -131,6 +132,7 @@ enum ice_mac_type {
ICE_MAC_E810,
ICE_MAC_E830,
ICE_MAC_GENERIC,
+ ICE_MAC_GENERIC_3K_E825,
};
/* Media Types */
@@ -148,7 +150,6 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
- ICE_VSI_SWITCHDEV_CTRL = 7,
};
struct ice_link_status {
@@ -202,6 +203,7 @@ struct ice_phy_info {
enum ice_fltr_ptype {
/* NONE - used for undef/error */
ICE_FLTR_PTYPE_NONF_NONE = 0,
+ ICE_FLTR_PTYPE_NONF_ETH,
ICE_FLTR_PTYPE_NONF_IPV4_UDP,
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
@@ -246,6 +248,7 @@ struct ice_fd_hw_prof {
int cnt;
u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
+ u64 prof_id[ICE_FD_HW_SEG_MAX];
};
/* Common HW capabilities for SW use */
@@ -293,6 +296,7 @@ struct ice_hw_common_caps {
bool pcie_reset_avoidance;
/* Post update reset restriction */
bool reset_restrict_support;
+ bool tx_sched_topo_comp_mode_en;
};
/* IEEE 1588 TIME_SYNC specific info */
@@ -351,6 +355,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
+#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
struct ice_ts_dev_info {
/* Device specific info */
@@ -364,6 +369,7 @@ struct ice_ts_dev_info {
u8 tmr0_ena;
u8 tmr1_ena;
u8 ts_ll_read;
+ u8 ts_ll_int_read;
};
/* Function specific capabilities */
@@ -377,6 +383,8 @@ struct ice_hw_func_caps {
struct ice_ts_func_info ts_func_info;
};
+#define ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT 0
+
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
@@ -385,6 +393,11 @@ struct ice_hw_dev_caps {
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
+ /* bitmap of supported sensors
+ * bit 0 - internal temperature sensor
+ * bit 31:1 - Reserved
+ */
+ u32 supported_sensors;
};
/* MAC info */
@@ -730,24 +743,6 @@ struct ice_switch_info {
DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
-/* FW logging configuration */
-struct ice_fw_log_evnt {
- u8 cfg : 4; /* New event enables to configure */
- u8 cur : 4; /* Current/active event enables */
-};
-
-struct ice_fw_log_cfg {
- u8 cq_en : 1; /* FW logging is enabled via the control queue */
- u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */
- u8 actv_evnts; /* Cumulation of currently enabled log events */
-
-#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S)
- struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
-};
-
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
@@ -827,7 +822,7 @@ struct ice_mbx_data {
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
ICE_PHY_E810 = 1,
- ICE_PHY_E822,
+ ICE_PHY_E82X,
};
/* Port hardware description */
@@ -855,6 +850,8 @@ struct ice_hw {
u16 max_burst_size; /* driver sets this value */
+ u8 recp_reuse:1; /* indicates whether FW supports recipe reuse */
+
/* Tx Scheduler values */
u8 num_tx_sched_layers;
u8 num_tx_sched_phys_layers;
@@ -889,7 +886,9 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fw_log_cfg fw_log;
+ struct ice_fwlog_cfg fwlog_cfg;
+ bool fwlog_supported; /* does hardware support FW logging? */
+ struct ice_fwlog_ring fwlog_ring;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
@@ -910,10 +909,9 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_PHY_PER_NAC_E822 1
#define ICE_MAX_QUAD 2
-#define ICE_QUADS_PER_PHY_E822 2
-#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_QUADS_PER_PHY_E82X 2
+#define ICE_PORTS_PER_PHY_E82X 8
#define ICE_PORTS_PER_QUAD 4
#define ICE_PORTS_PER_PHY_E810 4
#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
@@ -1009,7 +1007,6 @@ struct ice_hw_port_stats {
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
- u64 rx_len_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
@@ -1048,6 +1045,7 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
+ ICE_MIRROR_PACKET,
ICE_NOP,
ICE_INVAL_ACT
};
@@ -1078,7 +1076,7 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_OROM_VER_BUILD_SHIFT 8
#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
#define ICE_OROM_VER_SHIFT 24
-#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
+#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index aca1f2ea5034..48a8d462d76a 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -248,25 +248,32 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
}
/**
- * ice_vf_recreate_vsi - Release and re-create the VF's VSI
- * @vf: VF to recreate the VSI for
+ * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
+ * @vf: VF to reconfigure the VSI for
*
- * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
- * VF configuration change, etc)
+ * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
+ * configuration change, etc).
*
- * It releases and then re-creates a new VSI.
+ * It brings the VSI down and then reconfigures it with the hardware.
*/
-static int ice_vf_recreate_vsi(struct ice_vf *vf)
+int ice_vf_reconfig_vsi(struct ice_vf *vf)
{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
struct ice_pf *pf = vf->pf;
int err;
- ice_vf_vsi_release(vf);
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ vsi->flags = ICE_VSI_FLAG_NO_INIT;
- err = vf->vf_ops->create_vsi(vf);
+ ice_vsi_decfg(vsi);
+ ice_fltr_remove_all(vsi);
+
+ err = ice_vsi_cfg(vsi);
if (err) {
dev_err(ice_pf_to_dev(pf),
- "Failed to recreate the VF%u's VSI, error %d\n",
+ "Failed to reconfigure the VF%u's VSI, error %d\n",
vf->vf_id, err);
return err;
}
@@ -300,7 +307,6 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
* vf->lan_vsi_idx
*/
vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
return 0;
}
@@ -760,6 +766,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
+ ice_eswitch_detach(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -775,13 +782,11 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
+ ice_eswitch_attach(pf, vf);
+
mutex_unlock(&vf->cfg_lock);
}
- if (ice_is_eswitch_mode_switchdev(pf))
- if (ice_eswitch_rebuild(pf))
- dev_warn(dev, "eswitch rebuild failed\n");
-
ice_flush(hw);
clear_bit(ICE_VF_DIS, pf->state);
@@ -829,12 +834,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
bool rsd;
dev = ice_pf_to_dev(pf);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -850,6 +859,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
else
lockdep_assert_held(&vf->cfg_lock);
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
@@ -914,7 +934,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_vf_pre_vsi_rebuild(vf);
- if (ice_vf_recreate_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf)) {
dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
vf->vf_id);
err = -EFAULT;
@@ -928,12 +948,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
- ice_eswitch_update_repr(vsi);
+ ice_eswitch_update_repr(vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
out_unlock:
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
@@ -965,10 +990,13 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
/* assign default capabilities */
vf->spoofchk = true;
- vf->num_vf_qs = vfs->num_qps_per;
ice_vc_set_default_allowlist(vf);
ice_virtchnl_set_dflt_ops(vf);
+ /* set default number of MSI-X */
+ vf->num_msix = vfs->num_msix_per;
+ vf->num_vf_qs = vfs->num_qps_per;
+
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/
@@ -1213,7 +1241,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_vsi *vsi;
params.type = ICE_VSI_CTRL;
- params.pi = ice_vf_get_port_info(vf);
+ params.port_info = ice_vf_get_port_info(vf);
params.vf = vf;
params.flags = ICE_VSI_FLAG_INIT;
@@ -1281,13 +1309,12 @@ int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
}
/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
* @vf: VF to remove access to VSI for
*/
void ice_vf_invalidate_vsi(struct ice_vf *vf)
{
vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 93c774f2f437..fec16919ec19 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -62,7 +62,6 @@ struct ice_vf_ops {
bool (*poll_reset_status)(struct ice_vf *vf);
void (*clear_reset_trigger)(struct ice_vf *vf);
void (*irq_close)(struct ice_vf *vf);
- int (*create_vsi)(struct ice_vf *vf);
void (*post_vsi_rebuild)(struct ice_vf *vf);
};
@@ -110,11 +109,6 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
@@ -130,7 +124,7 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
- struct ice_repr *repr;
+ unsigned long repr_id;
const struct ice_virtchnl_ops *virtchnl_ops;
const struct ice_vf_ops *vf_ops;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..91ba7fe0eaee 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -23,6 +23,7 @@
#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
#endif
+int ice_vf_reconfig_vsi(struct ice_vf *vf);
void ice_initialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index d7b10dc67f03..b3e1bdcb80f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -26,29 +26,31 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vsi->back;
- if (ice_is_dvm_ena(&pf->hw)) {
- vlan_ops = &vsi->outer_vlan_ops;
-
- /* setup outer VLAN ops */
- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
+ if (ice_is_dvm_ena(&pf->hw)) {
vlan_ops->add_vlan = noop_vlan_arg;
vlan_ops->del_vlan = noop_vlan_arg;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- } else {
- vlan_ops = &vsi->inner_vlan_ops;
+ /* setup outer VLAN ops */
+ vlan_ops = &vsi->outer_vlan_ops;
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ } else {
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
+
+ /* all Rx traffic should be in the domain of the assigned port VLAN,
+ * so prevent disabling Rx VLAN filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
+
vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
}
@@ -77,6 +79,8 @@ static void ice_port_vlan_off(struct ice_vsi *vsi)
vlan_ops->del_vlan = ice_vsi_del_vlan;
}
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@@ -141,7 +145,6 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
&vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index cdf17b1e2f25..1c6ce0c4ed4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vf->driver_caps = *(u32 *)msg;
else
vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
vf->driver_caps);
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
- } else {
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
- else
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
- }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
@@ -506,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
@@ -552,27 +545,20 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
*/
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi(pf, vsi_id);
-
- return (vsi && (vsi->vf == vf));
+ return vsi_id == ICE_VF_VSI_ID;
}
/**
* ice_vc_isvalid_q_id
- * @vf: pointer to the VF info
- * @vsi_id: VSI ID
+ * @vsi: VSI to check queue ID against
* @qid: VSI relative queue ID
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
{
- struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
/* allocated Tx and Rx queues should be always equal for VF VSI */
- return (vsi && (qid < vsi->alloc_txq));
+ return qid < vsi->alloc_txq;
}
/**
@@ -689,9 +675,7 @@ out:
* a specific virtchnl RSS cfg
* @hw: pointer to the hardware
* @rss_cfg: pointer to the virtchnl RSS cfg
- * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
- * to configure
- * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @hash_cfg: pointer to the HW hash configuration
*
* Return true if all the protocol header and hash fields in the RSS cfg could
* be parsed, else return false
@@ -699,13 +683,23 @@ out:
* This function parses the virtchnl RSS cfg to be the intended
* hash fields and the intended header for RSS configuration
*/
-static bool
-ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
- u32 *addl_hdrs, u64 *hash_flds)
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
{
const struct ice_vc_hash_field_match_type *hf_list;
const struct ice_vc_hdr_match_type *hdr_list;
int i, hf_list_len, hdr_list_len;
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
hf_list = ice_vc_hash_field_list;
hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
@@ -823,8 +817,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
int status;
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -832,11 +826,9 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
goto error_param;
}
- ctx->info.q_opt_rss = ((lut_type <<
- ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ ctx->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
/* Preserve existing queueing option setting */
ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
@@ -858,18 +850,24 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
kfree(ctx);
} else {
- u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- u64 hash_flds = ICE_HASH_INVALID;
+ struct ice_rss_hash_cfg cfg;
+
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
- &hash_flds)) {
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add) {
- if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs)) {
+ if (ice_add_rss_cfg(hw, vsi, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
vsi->vsi_num, v_ret);
@@ -877,8 +875,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
} else {
int status;
- status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs);
+ status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
/* We just ignore -ENOENT, because if two configurations
* share the same profile remove one of them actually
* removes both, since the profile is deleted.
@@ -989,6 +986,51 @@ error_param:
}
/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1274,7 +1316,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
*/
q_map = vqs->rx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1296,7 +1338,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1401,7 +1443,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1427,7 +1469,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
} else if (q_map) {
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1463,13 +1505,12 @@ error_param:
* ice_cfg_interrupt
* @vf: pointer to the VF info
* @vsi: the VSI being configured
- * @vector_id: vector ID
* @map: vector map for mapping vectors to queues
* @q_vector: structure for interrupt vector
* configure the IRQ to queue map
*/
-static int
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+static enum virtchnl_status_code
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
struct virtchnl_vector_map *map,
struct ice_q_vector *q_vector)
{
@@ -1483,13 +1524,14 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
q_vector->rx.itr_idx);
}
@@ -1497,13 +1539,14 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ ice_cfg_txq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
q_vector->tx.itr_idx);
}
@@ -1523,7 +1566,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
u16 num_q_vectors_mapped, vsi_id, vector_id;
struct virtchnl_irq_map_info *irqmap_info;
struct virtchnl_vector_map *map;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
@@ -1535,7 +1577,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->vfs.num_msix_per < num_q_vectors_mapped ||
+ vf->num_msix < num_q_vectors_mapped ||
!num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -1557,7 +1599,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < pf->vfs.num_msix_per) ||
+ if (!(vector_id < vf->num_msix) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1578,8 +1620,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
}
/* lookout for the invalid queue index */
- v_ret = (enum virtchnl_status_code)
- ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+ v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
if (v_ret)
goto error_param;
}
@@ -1603,9 +1644,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
int i = -1, q_idx;
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -1640,7 +1696,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->txq.headwb_enabled ||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
goto error_param;
}
@@ -1729,6 +1785,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
@@ -1743,6 +1804,11 @@ error_param:
vf->vf_id, i);
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
ice_lag_move_new_vf_nodes(vf);
/* send the response to the VF */
@@ -2610,7 +2676,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
}
if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
v_ret = ice_err_to_virt_err(status);
}
@@ -3022,7 +3088,7 @@ static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
{
struct ice_vlan vlan = { 0 };
- vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci);
vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
vlan.tpid = vc_vlan->tpid;
@@ -3731,6 +3797,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
+ .config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
.add_vlan_msg = ice_vc_add_vlan_msg,
@@ -3860,6 +3927,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
+ .config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
.add_vlan_msg = ice_vc_add_vlan_msg,
@@ -4042,6 +4110,9 @@ error_handler:
case VIRTCHNL_OP_CONFIG_RSS_LUT:
err = ops->config_rss_lut(vf, msg);
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ err = ops->config_rss_hfunc(vf, msg);
+ break;
case VIRTCHNL_OP_GET_STATS:
err = ops->get_stats_msg(vf, msg);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index cd747718de73..3a4115869153 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -19,6 +19,15 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* VFs only get a single VSI. For ice hardware, the VF does not need to know
+ * its VSI index. However, the virtchnl interface requires a VSI number,
+ * mainly due to legacy hardware.
+ *
+ * Since the VF doesn't need this information, report a static value to the VF
+ * instead of leaking any information about the PF or hardware setup.
+ */
+#define ICE_VF_VSI_ID 1
+
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
@@ -32,6 +41,7 @@ struct ice_virtchnl_ops {
int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_hfunc)(struct ice_vf *vf, u8 *msg);
int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 7d547fa616fa..d796dbd2a440 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -13,8 +13,6 @@
* - opcodes needed by VF when caps are activated
*
* Caps that don't use new opcodes (no opcodes should be allowed):
- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
* - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
* - VIRTCHNL_VF_OFFLOAD_CRC
* - VIRTCHNL_VF_OFFLOAD_RX_POLLING
@@ -68,6 +66,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 24b23b7ef04a..8e4ff3af86c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -10,19 +10,6 @@
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
-#define ICE_FLOW_PROF_TYPE_S 0
-#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
-#define ICE_FLOW_PROF_VSI_S 32
-#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
-
-/* Flow profile ID format:
- * [0:31] - flow type, flow + tun_offs
- * [32:63] - VSI index
- */
-#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
- ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
- (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
-
#define GTPU_TEID_OFFSET 4
#define GTPU_EH_QFI_OFFSET 1
#define GTPU_EH_QFI_MASK 0x3F
@@ -107,9 +94,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
return -EINVAL;
- if (vsi_id != vf->lan_vsi_num)
- return -EINVAL;
-
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
@@ -493,6 +477,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
return;
vf_prof = fdir->fdir_prof[flow];
+ prof_id = vf_prof->prof_id[tun];
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
@@ -503,9 +488,6 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
if (!fdir->prof_entry_cnt[flow][tun])
return;
- prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
- flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
-
for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
if (vf_prof->entry_h[i][tun]) {
u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
@@ -647,7 +629,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_hw *hw;
u64 entry1_h = 0;
u64 entry2_h = 0;
- u64 prof_id;
int ret;
pf = vf->pf;
@@ -681,18 +662,15 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
ice_vc_fdir_rem_prof(vf, flow, tun);
}
- prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
- tun ? ICE_FLTR_PTYPE_MAX : 0);
-
- ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- tun + 1, &prof);
+ ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
+ tun + 1, false, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
- ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (ret) {
@@ -701,7 +679,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
goto err_prof;
}
- ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (ret) {
@@ -725,14 +703,16 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
vf_prof->cnt++;
fdir->prof_entry_cnt[flow][tun]++;
+ vf_prof->prof_id[tun] = prof->id;
+
return 0;
err_entry_1:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
- ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
+ ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
- ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
err_exit:
return ret;
}
@@ -1480,16 +1460,15 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
int ret;
stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
- if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
- ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
+ if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
+ ICE_FXD_FLTR_WB_QW1_DD_YES) {
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
ret = -EINVAL;
goto err_exit;
}
- prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
- ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
+ prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
dev_err(dev, "VF %d: Desc show add, but ctx not",
@@ -1508,8 +1487,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
goto err_exit;
}
- error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
- ICE_FXD_FLTR_WB_QW1_FAIL_S;
+ error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
@@ -1524,8 +1502,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
goto err_exit;
}
- error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
- ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
+ error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 76266e709a39..2e9ad27cb9d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -131,6 +131,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
+ u8 *ivf;
int err;
/* do not allow modifying VLAN stripping when a port VLAN is configured
@@ -143,19 +144,24 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
if (!ctxt)
return -ENOMEM;
+ ivf = &ctxt->info.inner_vlan_flags;
+
/* Here we are configuring what the VSI should do with the VLAN tag in
* the Rx packet. We can either leave the tag in the packet or put it in
* the Rx descriptor.
*/
- if (ena)
+ if (ena) {
/* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
- else
+ *ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
+ ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH);
+ } else {
/* Disable stripping. Leave tag in packet */
- ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+ *ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
+ }
/* Allow all packets untagged/tagged */
- ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+ *ivf |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
@@ -481,10 +487,11 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
ctxt->info.outer_vlan_flags |=
- ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
- ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M));
+ /* we want EMODE_SHOW_BOTH, but that value is zero, so the line
+ * above clears it well enough that we don't need to try to set
+ * zero here, so just do the tag type
+ */
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -589,11 +596,9 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M |
ICE_AQ_VSI_OUTER_TAG_TYPE_M);
ctxt->info.outer_vlan_flags |=
- ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL) |
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -642,9 +647,8 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
ctxt->info.outer_vlan_flags |=
ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
- ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -702,8 +706,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
ctxt->info.outer_vlan_flags =
(ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW <<
ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M) |
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type) |
ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED <<
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) |
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
index 4a6c850d83ac..7aae7fdcfcdb 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -72,7 +72,6 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
ice_pf_vsi_init_vlan_ops(vsi);
break;
case ICE_VSI_VF:
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 99954508184f..aa81d1162b81 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
return -EBUSY;
usleep_range(1000, 2000);
}
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
@@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
@@ -217,53 +218,39 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
int err;
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
- memset(qg_buf, 0, size);
- qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
if (err)
return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
- err = ice_vsi_cfg_rxq(rx_ring);
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err)
return err;
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
return err;
- clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return 0;
}
@@ -458,6 +445,11 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
+ /* Put private info that changes on a per-packet basis
+ * into xdp_buff_xsk->cb.
+ */
+ ice_xdp_meta_set_desc(*xdp, rx_desc);
+
rx_desc++;
xdp++;
}
@@ -563,8 +555,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
net_prefetch(xdp->data_meta);
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -820,7 +811,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start), 0, size);
+ virt_to_page(xdp->data_hard_start),
+ XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@@ -863,8 +855,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
rx_desc = ICE_RX_DESC(rx_ring, ntc);
@@ -891,7 +882,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (!first) {
first = xdp;
- xdp_buff_clear_frags_flag(first);
} else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
break;
}
@@ -942,13 +932,10 @@ construct_skb:
total_rx_bytes += skb->len;
total_rx_packets++;
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
-
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
+ vlan_tci = ice_get_vlan_tci(rx_desc);
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- ice_receive_skb(rx_ring, skb, vlan_tag);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
+ ice_receive_skb(rx_ring, skb, vlan_tci);
}
rx_ring->next_to_clean = ntc;
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index bee73353b56a..e7a036538246 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -15,7 +15,7 @@ struct idpf_vport_max_q;
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/sctp.h>
-#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <net/gro.h>
#include <linux/dim.h>
@@ -37,8 +37,6 @@ struct idpf_vport_max_q;
#define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
-#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
-#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
#define IDPF_MAX_WAIT 500
@@ -66,14 +64,12 @@ struct idpf_mac_filter {
/**
* enum idpf_state - State machine to handle bring up
- * @__IDPF_STARTUP: Start the state machine
* @__IDPF_VER_CHECK: Negotiate virtchnl version
* @__IDPF_GET_CAPS: Negotiate capabilities
* @__IDPF_INIT_SW: Init based on given capabilities
* @__IDPF_STATE_LAST: Must be last, used to determine size
*/
enum idpf_state {
- __IDPF_STARTUP,
__IDPF_VER_CHECK,
__IDPF_GET_CAPS,
__IDPF_INIT_SW,
@@ -87,6 +83,7 @@ enum idpf_state {
* @IDPF_HR_RESET_IN_PROG: Reset in progress
* @IDPF_REMOVE_IN_PROG: Driver remove in progress
* @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
+ * @IDPF_VC_CORE_INIT: virtchnl core has been init
* @IDPF_FLAGS_NBITS: Must be last
*/
enum idpf_flags {
@@ -95,6 +92,7 @@ enum idpf_flags {
IDPF_HR_RESET_IN_PROG,
IDPF_REMOVE_IN_PROG,
IDPF_MB_INTR_MODE,
+ IDPF_VC_CORE_INIT,
IDPF_FLAGS_NBITS,
};
@@ -209,71 +207,6 @@ struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
};
-/* These macros allow us to generate an enum and a matching char * array of
- * stringified enums that are always in sync. Checkpatch issues a bogus warning
- * about this being a complex macro; but it's wrong, these are never used as a
- * statement and instead only used to define the enum and array.
- */
-#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
- STATE(IDPF_VC_CREATE_VPORT) \
- STATE(IDPF_VC_CREATE_VPORT_ERR) \
- STATE(IDPF_VC_ENA_VPORT) \
- STATE(IDPF_VC_ENA_VPORT_ERR) \
- STATE(IDPF_VC_DIS_VPORT) \
- STATE(IDPF_VC_DIS_VPORT_ERR) \
- STATE(IDPF_VC_DESTROY_VPORT) \
- STATE(IDPF_VC_DESTROY_VPORT_ERR) \
- STATE(IDPF_VC_CONFIG_TXQ) \
- STATE(IDPF_VC_CONFIG_TXQ_ERR) \
- STATE(IDPF_VC_CONFIG_RXQ) \
- STATE(IDPF_VC_CONFIG_RXQ_ERR) \
- STATE(IDPF_VC_ENA_QUEUES) \
- STATE(IDPF_VC_ENA_QUEUES_ERR) \
- STATE(IDPF_VC_DIS_QUEUES) \
- STATE(IDPF_VC_DIS_QUEUES_ERR) \
- STATE(IDPF_VC_MAP_IRQ) \
- STATE(IDPF_VC_MAP_IRQ_ERR) \
- STATE(IDPF_VC_UNMAP_IRQ) \
- STATE(IDPF_VC_UNMAP_IRQ_ERR) \
- STATE(IDPF_VC_ADD_QUEUES) \
- STATE(IDPF_VC_ADD_QUEUES_ERR) \
- STATE(IDPF_VC_DEL_QUEUES) \
- STATE(IDPF_VC_DEL_QUEUES_ERR) \
- STATE(IDPF_VC_ALLOC_VECTORS) \
- STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_DEALLOC_VECTORS) \
- STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_SET_SRIOV_VFS) \
- STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
- STATE(IDPF_VC_GET_RSS_LUT) \
- STATE(IDPF_VC_GET_RSS_LUT_ERR) \
- STATE(IDPF_VC_SET_RSS_LUT) \
- STATE(IDPF_VC_SET_RSS_LUT_ERR) \
- STATE(IDPF_VC_GET_RSS_KEY) \
- STATE(IDPF_VC_GET_RSS_KEY_ERR) \
- STATE(IDPF_VC_SET_RSS_KEY) \
- STATE(IDPF_VC_SET_RSS_KEY_ERR) \
- STATE(IDPF_VC_GET_STATS) \
- STATE(IDPF_VC_GET_STATS_ERR) \
- STATE(IDPF_VC_ADD_MAC_ADDR) \
- STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
- STATE(IDPF_VC_DEL_MAC_ADDR) \
- STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
- STATE(IDPF_VC_GET_PTYPE_INFO) \
- STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
- STATE(IDPF_VC_LOOPBACK_STATE) \
- STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
- STATE(IDPF_VC_NBITS)
-
-#define IDPF_GEN_ENUM(ENUM) ENUM,
-#define IDPF_GEN_STRING(STRING) #STRING,
-
-enum idpf_vport_vc_state {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
-};
-
-extern const char * const idpf_vport_vc_state_str[];
-
/**
* enum idpf_vport_reset_cause - Vport soft reset causes
* @IDPF_SR_Q_CHANGE: Soft reset queue change
@@ -358,11 +291,7 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @link_speed_mbps: Link speed in mbps
- * @vc_msg: Virtchnl message buffer
- * @vc_state: Virtchnl message state
- * @vchnl_wq: Wait queue for virtchnl messages
* @sw_marker_wq: workqueue for marker packets
- * @vc_buf_lock: Lock to protect virtchnl buffer
*/
struct idpf_vport {
u16 num_txq;
@@ -408,21 +337,18 @@ struct idpf_vport {
bool link_up;
u32 link_speed_mbps;
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
-
- wait_queue_head_t vchnl_wq;
wait_queue_head_t sw_marker_wq;
- struct mutex vc_buf_lock;
};
/**
* enum idpf_user_flags
+ * @__IDPF_USER_FLAG_HSPLIT: header split state
* @__IDPF_PROMISC_UC: Unicast promiscuous mode
* @__IDPF_PROMISC_MC: Multicast promiscuous mode
* @__IDPF_USER_FLAGS_NBITS: Must be last
*/
enum idpf_user_flags {
+ __IDPF_USER_FLAG_HSPLIT = 0U,
__IDPF_PROMISC_UC = 32,
__IDPF_PROMISC_MC,
@@ -474,15 +400,11 @@ struct idpf_vport_user_config_data {
* enum idpf_vport_config_flags - Vport config flags
* @IDPF_VPORT_REG_NETDEV: Register netdev
* @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
- * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
- * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
* @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
*/
enum idpf_vport_config_flags {
IDPF_VPORT_REG_NETDEV,
IDPF_VPORT_UP_REQUESTED,
- IDPF_VPORT_ADD_MAC_REQ,
- IDPF_VPORT_DEL_MAC_REQ,
IDPF_VPORT_CONFIG_FLAGS_NBITS,
};
@@ -553,11 +475,13 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- void *req_qs_chunks;
+ struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
+struct idpf_vc_xn_manager;
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -599,9 +523,7 @@ struct idpf_vport_config {
* @stats_task: Periodic statistics retrieval task
* @stats_wq: Workqueue for statistics task
* @caps: Negotiated capabilities with device
- * @vchnl_wq: Wait queue for virtchnl messages
- * @vc_state: Virtchnl message state
- * @vc_msg: Virtchnl message buffer
+ * @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
@@ -657,10 +579,8 @@ struct idpf_adapter {
struct delayed_work stats_task;
struct workqueue_struct *stats_wq;
struct virtchnl2_get_capabilities caps;
+ struct idpf_vc_xn_manager *vcxn_mngr;
- wait_queue_head_t vchnl_wq;
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
struct idpf_dev_ops dev_ops;
int num_vfs;
bool crc_enable;
@@ -901,68 +821,21 @@ void idpf_mbx_task(struct work_struct *work);
void idpf_vc_event_task(struct work_struct *work);
void idpf_dev_ops_init(struct idpf_adapter *adapter);
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
-int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
-void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
-int idpf_vc_core_init(struct idpf_adapter *adapter);
-void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_intr_req(struct idpf_adapter *adapter);
void idpf_intr_rel(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
- struct idpf_vec_regs *reg_vals);
u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
-int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
void idpf_deinit_task(struct idpf_adapter *adapter);
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs,
struct idpf_vector_info *vec_info);
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
-int idpf_get_vec_ids(struct idpf_adapter *adapter,
- u16 *vecids, int num_vecids,
- struct virtchnl2_vector_chunks *chunks);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg);
void idpf_set_ethtool_ops(struct net_device *netdev);
-int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
- bool add, bool async);
-int idpf_set_promiscuous(struct idpf_adapter *adapter,
- struct idpf_vport_user_config_data *config_data,
- u32 vport_id);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
-u32 idpf_get_vport_id(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_check_supported_desc_ids(struct idpf_vport *vport);
void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
u16 itr, bool tx);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
+u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
+bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
+
#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index c7f43d2fcd13..4849590a5591 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -516,6 +516,8 @@ post_buffs_out:
/* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1;
+ dma_wmb();
+
wr32(hw, cq->reg.tail, cq->next_to_post);
}
@@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
int err = 0;
u16 i;
- if (*num_q_msg == 0)
- return 0;
- else if (*num_q_msg > cq->ring_size)
- return -EBADR;
-
/* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index 8dee098bbfb0..e8e046ef2f0d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -69,6 +69,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+ struct {
+ u32 rsvd;
+ u16 data;
+ u16 flags;
+ } sw_cookie;
} ctx;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 34ad1ac46b78..3df9935685e9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 52ea38669f85..986d429d1175 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -75,14 +75,12 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
/**
* idpf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
*/
-static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int idpf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
@@ -103,15 +101,14 @@ static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (key)
- memcpy(key, rss_data->rss_key, rss_data->rss_key_size);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < rss_data->rss_lut_size; i++)
- indir[i] = rss_data->rss_lut[i];
+ rxfh->indir[i] = rss_data->rss_lut[i];
}
unlock_mutex:
@@ -123,15 +120,15 @@ unlock_mutex:
/**
* idpf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
*/
-static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int idpf_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
@@ -154,17 +151,18 @@ static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
err = -EOPNOTSUPP;
goto unlock_mutex;
}
- if (key)
- memcpy(rss_data->rss_key, key, rss_data->rss_key_size);
+ if (rxfh->key)
+ memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
for (lut = 0; lut < rss_data->rss_lut_size; lut++)
- rss_data->rss_lut[lut] = indir[lut];
+ rss_data->rss_lut[lut] = rxfh->indir[lut];
}
err = idpf_config_rss(vport);
@@ -320,6 +318,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_pending = vport->rxq_desc_count;
ring->tx_pending = vport->txq_desc_count;
+ kring->tcp_data_split = idpf_vport_get_hsplit(vport);
+
idpf_vport_ctrl_unlock(netdev);
}
@@ -379,6 +379,14 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_rx_count == vport->rxq_desc_count)
goto unlock_mutex;
+ if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
+ NL_SET_ERR_MSG_MOD(ext_ack,
+ "setting TCP data split is not supported");
+ err = -EOPNOTSUPP;
+
+ goto unlock_mutex;
+ }
+
config_data = &vport->adapter->vport_config[idx]->user_config;
config_data->num_req_txq_desc = new_tx_count;
config_data->num_req_rxq_desc = new_rx_count;
@@ -532,7 +540,7 @@ static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
unsigned int i;
for (i = 0; i < size; i++)
- ethtool_sprintf(p, "%s", stats[i].stat_string);
+ ethtool_puts(p, stats[i].stat_string);
}
/**
@@ -1334,6 +1342,7 @@ static int idpf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_msglevel = idpf_get_msglevel,
.set_msglevel = idpf_set_msglevel,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 19809b0ddcd9..52ceda6306a3 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2,14 +2,11 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq;
-const char * const idpf_vport_vc_state_str[] = {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
-};
-
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct
@@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
*/
void idpf_intr_rel(struct idpf_adapter *adapter)
{
- int err;
-
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
-
- err = idpf_send_dealloc_vectors_msg(adapter);
- if (err)
- dev_err(&adapter->pdev->dev,
- "Failed to deallocate vectors: %d\n", err);
-
+ idpf_send_dealloc_vectors_msg(adapter);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -783,6 +773,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
+ netdev->dev_port = idx;
+
/* configure default MTU size */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = vport->max_mtu;
@@ -973,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q;
u16 idx = vport->idx;
- int i;
vport_config = adapter->vport_config[vport->idx];
idpf_deinit_rss(vport);
@@ -983,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
idpf_send_destroy_vport_msg(vport);
- /* Set all bits as we dont know on which vc_state the vport vhnl_wq
- * is waiting on and wakeup the virtchnl workqueue even if it is
- * waiting for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, vport->vc_state);
- wake_up(&vport->vchnl_wq);
-
- mutex_destroy(&vport->vc_buf_lock);
-
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, vport->vc_state);
-
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
max_q.max_txq = vport_config->max_q.max_txq;
@@ -1058,6 +1035,71 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
}
/**
+ * idpf_is_hsplit_supported - check whether the header split is supported
+ * @vport: virtual port to check the capability for
+ *
+ * Return: true if it's supported by the HW/FW, false if not.
+ */
+static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
+{
+ return idpf_is_queue_model_split(vport->rxq_model) &&
+ idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
+ IDPF_CAP_HSPLIT);
+}
+
+/**
+ * idpf_vport_get_hsplit - get the current header split feature state
+ * @vport: virtual port to query the state for
+ *
+ * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
+ * ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
+ * ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
+ */
+u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
+{
+ const struct idpf_vport_user_config_data *config;
+
+ if (!idpf_is_hsplit_supported(vport))
+ return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+
+ config = &vport->adapter->vport_config[vport->idx]->user_config;
+
+ return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
+}
+
+/**
+ * idpf_vport_set_hsplit - enable or disable header split on a given vport
+ * @vport: virtual port to configure
+ * @val: Ethtool flag controlling the header split state
+ *
+ * Return: true on success, false if not supported by the HW.
+ */
+bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
+{
+ struct idpf_vport_user_config_data *config;
+
+ if (!idpf_is_hsplit_supported(vport))
+ return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+
+ config = &vport->adapter->vport_config[vport->idx]->user_config;
+
+ switch (val) {
+ case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
+ /* Default is to enable */
+ case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
+ __set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
+ return true;
+ case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
+ __clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* idpf_vport_alloc - Allocates the next available struct vport in the adapter
* @adapter: board private structure
* @max_q: vport max queue info
@@ -1186,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
msecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
+ idpf_recv_mb_msg(adapter);
}
/**
@@ -1476,9 +1518,7 @@ void idpf_init_task(struct work_struct *work)
vport_config = adapter->vport_config[index];
init_waitqueue_head(&vport->sw_marker_wq);
- init_waitqueue_head(&vport->vchnl_wq);
- mutex_init(&vport->vc_buf_lock);
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
@@ -1756,6 +1796,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
goto unlock_mutex;
}
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
/* Initialize the state machine, also allocate memory and request
* resources
*/
@@ -1835,7 +1877,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1884,7 +1926,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Since idpf_vport_queues_alloc was called with new_port, the queue
* back pointers are currently pointing to the local new_vport. Reset
@@ -2192,7 +2234,7 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index e1febc74cefd..f784eea044bd 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_devids.h"
+#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
@@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev)
idpf_sriov_configure(pdev, 0);
idpf_vc_core_deinit(adapter);
+
/* Be a good citizen and leave the device clean on exit */
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
@@ -66,6 +68,8 @@ destroy_wqs:
adapter->vport_config = NULL;
kfree(adapter->netdevs);
adapter->netdevs = NULL;
+ kfree(adapter->vcxn_mngr);
+ adapter->vcxn_mngr = NULL;
mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock);
@@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock);
- init_waitqueue_head(&adapter->vchnl_wq);
-
INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 81288a17da2a..27b93592c4ba 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -328,10 +328,9 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
if (offload->tso_segs) {
qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S;
- qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) &
- IDPF_TXD_CTX_QW1_TSO_LEN_M;
- qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) &
- IDPF_TXD_CTX_QW1_MSS_M;
+ qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_TSO_LEN_M,
+ offload->tso_len);
+ qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.tx.lso_pkts);
@@ -1044,7 +1043,6 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
}
idpf_rx_sync_for_cpu(rx_buf, fields.size);
- skb = rx_q->skb;
if (skb)
idpf_rx_add_frag(rx_buf, skb, fields.size);
else
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 5e1ef70d54fe..285da2177ee4 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
@@ -396,7 +397,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
if (!rxq)
return;
- if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) {
+ if (rxq->skb) {
dev_kfree_skb_any(rxq->skb);
rxq->skb = NULL;
}
@@ -505,9 +506,9 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) |
- (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) <<
- IDPF_RX_BI_GEN_S);
+ FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RX_BI_GEN_M,
+ test_bit(__IDPF_Q_GEN_CHK, refillq->flags));
if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
@@ -1240,12 +1241,15 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_adapter *adapter = vport->adapter;
struct idpf_queue *q;
int i, k, err = 0;
+ bool hs;
vport->rxq_grps = kcalloc(vport->num_rxq_grp,
sizeof(struct idpf_rxq_group), GFP_KERNEL);
if (!vport->rxq_grps)
return -ENOMEM;
+ hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
int j;
@@ -1298,9 +1302,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
- if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
- IDPF_CAP_HSPLIT) &&
- idpf_is_queue_model_split(vport->rxq_model)) {
+
+ if (hs) {
q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
}
@@ -1344,9 +1347,7 @@ skip_splitq_rx_init:
rx_qgrp->splitq.rxq_sets[j]->refillq1 =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
- if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
- IDPF_CAP_HSPLIT) &&
- idpf_is_queue_model_split(vport->rxq_model)) {
+ if (hs) {
q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
}
@@ -1825,14 +1826,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
u16 gen;
/* if the descriptor isn't done, no work yet to do */
- gen = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+ gen = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_GEN_M);
if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
- rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+ rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
dev_err(&complq->vport->adapter->pdev->dev,
@@ -1842,9 +1843,8 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
tx_q = complq->txq_grp->txqs[rel_tx_qid];
/* Determine completion type */
- ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_COMPL_TYPE_M) >>
- IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+ ctype = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_COMPL_TYPE_M);
switch (ctype) {
case IDPF_TXD_COMPLT_RE:
hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
@@ -1945,11 +1945,10 @@ void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
u16 td_cmd, u16 size)
{
desc->q.qw1.cmd_dtype =
- cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M);
+ le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
desc->q.qw1.cmd_dtype |=
- cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) &
- IDPF_FLEX_TXD_QW1_CMD_M);
- desc->q.qw1.buf_size = cpu_to_le16((u16)size);
+ le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
+ desc->q.qw1.buf_size = cpu_to_le16(size);
desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
}
@@ -2365,7 +2364,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
*/
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
{
- const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const struct skb_shared_info *shinfo;
union {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -2379,13 +2378,15 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
u32 paylen, l4_start;
int err;
- if (!shinfo->gso_size)
+ if (!skb_is_gso(skb))
return 0;
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
+ shinfo = skb_shinfo(skb);
+
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
@@ -2841,8 +2842,9 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n
qword1);
csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
- csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M,
- le16_to_cpu(rx_desc->ptype_err_fflags0));
+ csum->raw_csum_inv =
+ le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
}
@@ -2936,8 +2938,10 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
struct idpf_rx_ptype_decoded decoded;
u16 rx_ptype;
- rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M,
- le16_to_cpu(rx_desc->ptype_err_fflags0));
+ rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
+
+ skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
/* If we don't know the ptype we can't do anything else with it. Just
@@ -2949,10 +2953,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, &decoded);
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
- if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M,
- le16_to_cpu(rx_desc->hdrlen_flags)))
+ if (le16_get_bits(rx_desc->hdrlen_flags,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
@@ -3003,8 +3005,7 @@ struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
/* prefetch first cache line of first page */
net_prefetch(va);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE,
- GFP_ATOMIC);
+ skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE);
if (unlikely(!skb)) {
idpf_rx_put_page(rx_buf);
@@ -3058,7 +3059,7 @@ static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
struct sk_buff *skb;
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC);
+ skb = napi_alloc_skb(&rxq->q_vector->napi, size);
if (unlikely(!skb))
return NULL;
@@ -3146,8 +3147,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
dma_rmb();
/* if the descriptor isn't done, no work yet to do */
- gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id);
+ gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
break;
@@ -3162,9 +3163,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
continue;
}
- pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M,
- pkt_len);
+ pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
rx_desc->status_err0_qw1);
@@ -3181,14 +3181,12 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
goto bypass_hsplit;
}
- hdr_len = le16_to_cpu(rx_desc->hdrlen_flags);
- hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M,
- hdr_len);
+ hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);
bypass_hsplit:
- bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M,
- bufq_id);
+ bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
if (!bufq_id)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index df76493faa75..3d046b81e507 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -8,6 +8,8 @@
#include <net/tcp.h>
#include <net/netdev_queues.h>
+#include "virtchnl2_lan_desc.h"
+
#define IDPF_LARGE_MAX_Q 256
#define IDPF_MAX_Q 16
#define IDPF_MIN_Q 2
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 8ade4e3a9fe1..629cb5cb7c9f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_vf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_VF_ITR_IDX_SPACING 0x40
@@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
+ idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 2c1b051fdc0d..a5f9b7a5effe 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2,46 +2,192 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
+ * buffer updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
+/**
+ * struct idpf_vc_xn_manager - Manager for tracking transactions
+ * @ring: backing and lookup for transactions
+ * @free_xn_bm: bitmap for free transactions
+ * @xn_bm_lock: make bitmap access synchronous where necessary
+ * @salt: used to make cookie unique every message
+ */
+struct idpf_vc_xn_manager {
+ struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
+ DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spinlock_t xn_bm_lock;
+ u8 salt;
+};
+
+/**
+ * idpf_vid_to_vport - Translate vport id to vport pointer
+ * @adapter: private data struct
+ * @v_id: vport id to translate
+ *
+ * Returns vport matching v_id, NULL if not found.
+ */
+static
+struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
+{
+ u16 num_max_vports = idpf_get_max_vports(adapter);
+ int i;
+
+ for (i = 0; i < num_max_vports; i++)
+ if (adapter->vport_ids[i] == v_id)
+ return adapter->vports[i];
+
+ return NULL;
+}
+
+/**
+ * idpf_handle_event_link - Handle link event message
+ * @adapter: private data struct
+ * @v2e: virtchnl event message
+ */
+static void idpf_handle_event_link(struct idpf_adapter *adapter,
+ const struct virtchnl2_event *v2e)
+{
+ struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
+ if (!vport) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
+ v2e->vport_id);
+ return;
+ }
+ np = netdev_priv(vport->netdev);
+
+ vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+
+ if (vport->link_up == v2e->link_status)
+ return;
+
+ vport->link_up = v2e->link_status;
+
+ if (np->state != __IDPF_VPORT_UP)
+ return;
+
+ if (vport->link_up) {
+ netif_tx_start_all_queues(vport->netdev);
+ netif_carrier_on(vport->netdev);
+ } else {
+ netif_tx_stop_all_queues(vport->netdev);
+ netif_carrier_off(vport->netdev);
+ }
+}
/**
* idpf_recv_event_msg - Receive virtchnl event message
- * @vport: virtual port structure
+ * @adapter: Driver specific private structure
* @ctlq_msg: message to copy from
*
* Receive virtchnl event message
*/
-static void idpf_recv_event_msg(struct idpf_vport *vport,
+static void idpf_recv_event_msg(struct idpf_adapter *adapter,
struct idpf_ctlq_msg *ctlq_msg)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ int payload_size = ctlq_msg->ctx.indirect.payload->size;
struct virtchnl2_event *v2e;
- bool link_status;
u32 event;
+ if (payload_size < sizeof(*v2e)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode,
+ payload_size);
+ return;
+ }
+
v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
event = le32_to_cpu(v2e->event);
switch (event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
- link_status = v2e->link_status;
-
- if (vport->link_up == link_status)
- break;
-
- vport->link_up = link_status;
- if (np->state == __IDPF_VPORT_UP) {
- if (vport->link_up) {
- netif_carrier_on(vport->netdev);
- netif_tx_start_all_queues(vport->netdev);
- } else {
- netif_tx_stop_all_queues(vport->netdev);
- netif_carrier_off(vport->netdev);
- }
- }
- break;
+ idpf_handle_event_link(adapter, v2e);
+ return;
default:
- dev_err(&vport->adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"Unknown event %d from PF\n", event);
break;
}
@@ -93,13 +239,14 @@ err_kfree:
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
+ * @cookie: unique SW generated cookie per message
*
* Will prepare the control queue message and initiates the send api
*
* Returns 0 on success, negative on failure
*/
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg)
+ u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
err = -ENOMEM;
goto dma_alloc_error;
}
- memcpy(dma_mem->va, msg, msg_size);
+
+ /* It's possible we're just sending an opcode but no buffer */
+ if (msg && msg_size)
+ memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->ctx.indirect.payload = dma_mem;
+ ctlq_msg->ctx.sw_cookie.data = cookie;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
if (err)
@@ -159,592 +310,432 @@ dma_mem_error:
return err;
}
-/**
- * idpf_find_vport - Find vport pointer from control queue message
- * @adapter: driver specific private structure
- * @vport: address of vport pointer to copy the vport from adapters vport list
- * @ctlq_msg: control queue message
+/* API for virtchnl "transaction" support ("xn" for short).
*
- * Return 0 on success, error value on failure. Also this function does check
- * for the opcodes which expect to receive payload and return error value if
- * it is not the case.
+ * We are reusing the completion lock to serialize the accesses to the
+ * transaction state for simplicity, but it could be its own separate synchro
+ * as well. For now, this API is only used from within a workqueue context;
+ * raw_spin_lock() is enough.
*/
-static int idpf_find_vport(struct idpf_adapter *adapter,
- struct idpf_vport **vport,
- struct idpf_ctlq_msg *ctlq_msg)
-{
- bool no_op = false, vid_found = false;
- int i, err = 0;
- char *vc_msg;
- u32 v_id;
+/**
+ * idpf_vc_xn_lock - Request exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_lock(xn) \
+ raw_spin_lock(&(xn)->completed.wait.lock)
- vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
- if (!vc_msg)
- return -ENOMEM;
+/**
+ * idpf_vc_xn_unlock - Release exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_unlock(xn) \
+ raw_spin_unlock(&(xn)->completed.wait.lock)
- if (ctlq_msg->data_len) {
- size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
+/**
+ * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
+ * reset the transaction state.
+ * @xn: struct idpf_vc_xn to update
+ */
+static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
+{
+ xn->reply.iov_base = NULL;
+ xn->reply.iov_len = 0;
- if (!payload_size) {
- dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
- kfree(vc_msg);
+ if (xn->state != IDPF_VC_XN_SHUTDOWN)
+ xn->state = IDPF_VC_XN_IDLE;
+}
- return -EINVAL;
- }
+/**
+ * idpf_vc_xn_init - Initialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
+ */
+static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+{
+ int i;
- memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
- }
-
- switch (ctlq_msg->cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- case VIRTCHNL2_OP_CREATE_VPORT:
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- goto free_vc_msg;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- case VIRTCHNL2_OP_DISABLE_VPORT:
- case VIRTCHNL2_OP_DESTROY_VPORT:
- v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- case VIRTCHNL2_OP_DEL_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- case VIRTCHNL2_OP_SET_RSS_LUT:
- v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- case VIRTCHNL2_OP_SET_RSS_KEY:
- v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_EVENT:
- v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
- break;
- default:
- no_op = true;
- break;
- }
+ spin_lock_init(&vcxn_mngr->xn_bm_lock);
- if (no_op)
- goto free_vc_msg;
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- for (i = 0; i < idpf_get_max_vports(adapter); i++) {
- if (adapter->vport_ids[i] == v_id) {
- vid_found = true;
- break;
- }
+ xn->state = IDPF_VC_XN_IDLE;
+ xn->idx = i;
+ idpf_vc_xn_release_bufs(xn);
+ init_completion(&xn->completed);
}
- if (vid_found)
- *vport = adapter->vports[i];
- else
- err = -EINVAL;
-
-free_vc_msg:
- kfree(vc_msg);
-
- return err;
+ bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
}
/**
- * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @err_enum: err bit to set on error
+ * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
*
- * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
- * negative on failure.
+ * All waiting threads will be woken-up and their transaction aborted. Further
+ * operations on that object will fail.
*/
-static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state err_enum)
+static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
- if (ctlq_msg->cookie.mbx.chnl_retval) {
- if (vport)
- set_bit(err_enum, vport->vc_state);
- else
- set_bit(err_enum, adapter->vc_state);
+ int i;
- return -EINVAL;
- }
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
- if (vport)
- memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
- else
- memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- return 0;
+ idpf_vc_xn_lock(xn);
+ xn->state = IDPF_VC_XN_SHUTDOWN;
+ idpf_vc_xn_release_bufs(xn);
+ idpf_vc_xn_unlock(xn);
+ complete_all(&xn->completed);
+ }
}
/**
- * idpf_recv_vchnl_op - helper function with common logic when handling the
- * reception of VIRTCHNL OPs.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @state: state bit used on timeout check
- * @err_state: err bit to set on error
+ * idpf_vc_xn_pop_free - Pop a free transaction from free list
+ * @vcxn_mngr: transaction manager to pop from
+ *
+ * Returns NULL if no free transactions
*/
-static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_state)
+static
+struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
{
- wait_queue_head_t *vchnl_wq;
- int err;
+ struct idpf_vc_xn *xn = NULL;
+ unsigned long free_idx;
- if (vport)
- vchnl_wq = &vport->vchnl_wq;
- else
- vchnl_wq = &adapter->vchnl_wq;
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ if (free_idx == IDPF_VC_XN_RING_LEN)
+ goto do_unlock;
- err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
- if (wq_has_sleeper(vchnl_wq)) {
- if (vport)
- set_bit(state, vport->vc_state);
- else
- set_bit(state, adapter->vc_state);
+ clear_bit(free_idx, vcxn_mngr->free_xn_bm);
+ xn = &vcxn_mngr->ring[free_idx];
+ xn->salt = vcxn_mngr->salt++;
- wake_up(vchnl_wq);
- } else {
- if (!err) {
- dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
- ctlq_msg->cookie.mbx.chnl_opcode);
- } else {
- /* Clear the errors since there is no sleeper to pass
- * them on
- */
- if (vport)
- clear_bit(err_state, vport->vc_state);
- else
- clear_bit(err_state, adapter->vc_state);
- }
- }
+do_unlock:
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
+
+ return xn;
}
/**
- * idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
- * @op: virtchannel operation code
- * @msg: Received message holding buffer
- * @msg_size: message size
- *
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * idpf_vc_xn_push_free - Push a free transaction to free list
+ * @vcxn_mngr: transaction manager to push to
+ * @xn: transaction to push
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size)
+static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
+ struct idpf_vc_xn *xn)
{
- struct idpf_vport *vport = NULL;
- struct idpf_ctlq_msg ctlq_msg;
- struct idpf_dma_mem *dma_mem;
- bool work_done = false;
- int num_retry = 2000;
- u16 num_q_msg;
- int err;
-
- while (1) {
- struct idpf_vport_config *vport_config;
- int payload_size = 0;
-
- /* Try to get one message */
- num_q_msg = 1;
- dma_mem = NULL;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
- /* If no message then decide if we have to retry based on
- * opcode
- */
- if (err || !num_q_msg) {
- /* Increasing num_retry to consider the delayed
- * responses because of large number of VF's mailbox
- * messages. If the mailbox message is received from
- * the other side, we come out of the sleep cycle
- * immediately else we wait for more time.
- */
- if (!op || !num_retry--)
- break;
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
- err = -EIO;
- break;
- }
- msleep(20);
- continue;
- }
+ idpf_vc_xn_release_bufs(xn);
+ set_bit(xn->idx, vcxn_mngr->free_xn_bm);
+}
- /* If we are here a message is received. Check if we are looking
- * for a specific message based on opcode. If it is different
- * ignore and post buffers
+/**
+ * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @params: parameters for this particular transaction including
+ * -vc_op: virtchannel operation to send
+ * -send_buf: kvec iov for send buf and len
+ * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
+ * -timeout_ms: timeout waiting for a reply (milliseconds)
+ * -async: don't wait for message reply, will lose caller context
+ * -async_handler: callback to handle async replies
+ *
+ * @returns >= 0 for success, the size of the initial reply (may or may not be
+ * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
+ * error.
+ */
+static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
+{
+ const struct kvec *send_buf = &params->send_buf;
+ struct idpf_vc_xn *xn;
+ ssize_t retval;
+ u16 cookie;
+
+ xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
+ /* no free transactions available */
+ if (!xn)
+ return -ENOSPC;
+
+ idpf_vc_xn_lock(xn);
+ if (xn->state == IDPF_VC_XN_SHUTDOWN) {
+ retval = -ENXIO;
+ goto only_unlock;
+ } else if (xn->state != IDPF_VC_XN_IDLE) {
+ /* We're just going to clobber this transaction even though
+ * it's not IDLE. If we don't reuse it we could theoretically
+ * eventually leak all the free transactions and not be able to
+ * send any messages. At least this way we make an attempt to
+ * remain functional even though something really bad is
+ * happening that's corrupting what was supposed to be free
+ * transactions.
*/
- if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
- goto post_buffs;
+ WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
+ xn->idx, xn->vc_op);
+ }
- err = idpf_find_vport(adapter, &vport, &ctlq_msg);
- if (err)
- goto post_buffs;
+ xn->reply = params->recv_buf;
+ xn->reply_sz = 0;
+ xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
+ xn->vc_op = params->vc_op;
+ xn->async_handler = params->async_handler;
+ idpf_vc_xn_unlock(xn);
- if (ctlq_msg.data_len)
- payload_size = ctlq_msg.ctx.indirect.payload->size;
+ if (!params->async)
+ reinit_completion(&xn->completed);
+ cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
+ FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- /* All conditions are met. Either a message requested is
- * received or we received a message to be processed
- */
- switch (ctlq_msg.cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
- ctlq_msg.cookie.mbx.chnl_opcode,
- ctlq_msg.cookie.mbx.chnl_retval);
- err = -EBADMSG;
- } else if (msg) {
- memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
- min_t(int, payload_size, msg_size));
- }
- work_done = true;
- break;
- case VIRTCHNL2_OP_CREATE_VPORT:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DESTROY_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_QUEUES,
- IDPF_VC_ADD_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DEL_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
- break;
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- /* This message can only be sent asynchronously. As
- * such we'll have lost the context in which it was
- * called and thus can only really report if it looks
- * like an error occurred. Don't bother setting ERR bit
- * or waking chnl_wq since no work queue will be waiting
- * to read the message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- }
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously. We don't
- * normally print errors here, instead
- * prefer to handle errors in the function
- * calling wait_for_event. However, if
- * asynchronous, the context in which the
- * message was sent is lost. We can't really do
- * anything about at it this point, but we
- * should at a minimum indicate that it looks
- * like something went wrong. Also don't bother
- * setting ERR bit or waking vchnl_wq since no
- * one will be waiting to read the async
- * message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_MAC_ADDR,
- IDPF_VC_ADD_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously like the
- * VIRTCHNL2_OP_ADD_MAC_ADDR
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_MAC_ADDR,
- IDPF_VC_DEL_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_EVENT:
- idpf_recv_event_msg(vport, &ctlq_msg);
- break;
- default:
- dev_warn(&adapter->pdev->dev,
- "Unhandled virtchnl response %d\n",
- ctlq_msg.cookie.mbx.chnl_opcode);
- break;
- }
+ retval = idpf_send_mb_msg(adapter, params->vc_op,
+ send_buf->iov_len, send_buf->iov_base,
+ cookie);
+ if (retval) {
+ idpf_vc_xn_lock(xn);
+ goto release_and_unlock;
+ }
-post_buffs:
- if (ctlq_msg.data_len)
- dma_mem = ctlq_msg.ctx.indirect.payload;
- else
- num_q_msg = 0;
+ if (params->async)
+ return 0;
- err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
- &num_q_msg, &dma_mem);
- /* If post failed clear the only buffer we supplied */
- if (err && dma_mem)
- dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
- dma_mem->va, dma_mem->pa);
+ wait_for_completion_timeout(&xn->completed,
+ msecs_to_jiffies(params->timeout_ms));
- /* Applies only if we are looking for a specific opcode */
- if (work_done)
- break;
+ /* No need to check the return value; we check the final state of the
+ * transaction below. It's possible the transaction actually gets more
+ * timeout than specified if we get preempted here but after
+ * wait_for_completion_timeout returns. This should be non-issue
+ * however.
+ */
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_SHUTDOWN:
+ retval = -ENXIO;
+ goto only_unlock;
+ case IDPF_VC_XN_WAITING:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
+ params->vc_op, params->timeout_ms);
+ retval = -ETIME;
+ break;
+ case IDPF_VC_XN_COMPLETED_SUCCESS:
+ retval = xn->reply_sz;
+ break;
+ case IDPF_VC_XN_COMPLETED_FAILED:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
+ params->vc_op);
+ retval = -EIO;
+ break;
+ default:
+ /* Invalid state. */
+ WARN_ON_ONCE(1);
+ retval = -EIO;
+ break;
}
- return err;
+release_and_unlock:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+ /* If we receive a VC reply after here, it will be dropped. */
+only_unlock:
+ idpf_vc_xn_unlock(xn);
+
+ return retval;
}
/**
- * __idpf_wait_for_event - wrapper function for wait on virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
- * @timeout: Max time to wait
+ * idpf_vc_xn_forward_async - Handle async reply receives
+ * @adapter: private data struct
+ * @xn: transaction to handle
+ * @ctlq_msg: corresponding ctlq_msg
*
- * Checks if state is set upon expiry of timeout. Returns 0 on success,
- * negative on failure.
+ * For async sends we're going to lose the caller's context so, if an
+ * async_handler was provided, it can deal with the reply, otherwise we'll just
+ * check and report if there is an error.
*/
-static int __idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check,
- int timeout)
+static int
+idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
{
- int time_to_wait, num_waits;
- wait_queue_head_t *vchnl_wq;
- unsigned long *vc_state;
+ int err = 0;
- time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
- num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ err = -EINVAL;
+ goto release_bufs;
+ }
- if (vport) {
- vchnl_wq = &vport->vchnl_wq;
- vc_state = vport->vc_state;
- } else {
- vchnl_wq = &adapter->vchnl_wq;
- vc_state = adapter->vc_state;
+ if (xn->async_handler) {
+ err = xn->async_handler(adapter, xn, ctlq_msg);
+ goto release_bufs;
+ }
+
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
}
- while (num_waits) {
- int event;
+release_bufs:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+
+ return err;
+}
+
+/**
+ * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @ctlq_msg: controlq message to send back to receiving thread
+ */
+static int
+idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ const void *payload = NULL;
+ size_t payload_size = 0;
+ struct idpf_vc_xn *xn;
+ u16 msg_info;
+ int err = 0;
+ u16 xn_idx;
+ u16 salt;
+
+ msg_info = ctlq_msg->ctx.sw_cookie.data;
+ xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
+ if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
+ xn_idx);
+ return -EINVAL;
+ }
+ xn = &adapter->vcxn_mngr->ring[xn_idx];
+ salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
+ if (xn->salt != salt) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
+ xn->salt, salt);
+ return -EINVAL;
+ }
- /* If we are here and a reset is detected do not wait but
- * return. Reset timing is out of drivers control. So
- * while we are cleaning resources as part of reset if the
- * underlying HW mailbox is gone, wait on mailbox messages
- * is not meaningful
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_WAITING:
+ /* success */
+ break;
+ case IDPF_VC_XN_IDLE:
+ dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ case IDPF_VC_XN_SHUTDOWN:
+ /* ENXIO is a bit special here as the recv msg loop uses that
+ * know if it should stop trying to clean the ring if we lost
+ * the virtchnl. We need to stop playing with registers and
+ * yield.
*/
- if (idpf_is_reset_detected(adapter))
- return 0;
+ err = -ENXIO;
+ goto out_unlock;
+ case IDPF_VC_XN_ASYNC:
+ err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
+ idpf_vc_xn_unlock(xn);
+ return err;
+ default:
+ dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EBUSY;
+ goto out_unlock;
+ }
- event = wait_event_timeout(*vchnl_wq,
- test_and_clear_bit(state, vc_state),
- msecs_to_jiffies(time_to_wait));
- if (event) {
- if (test_and_clear_bit(err_check, vc_state)) {
- dev_err(&adapter->pdev->dev, "VC response error %s\n",
- idpf_vport_vc_state_str[err_check]);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return -EINVAL;
- }
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return 0;
- }
- num_waits--;
+ if (ctlq_msg->data_len) {
+ payload = ctlq_msg->ctx.indirect.payload->va;
+ payload_size = ctlq_msg->ctx.indirect.payload->size;
}
- /* Timeout occurred */
- dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
- idpf_vport_vc_state_str[state]);
+ xn->reply_sz = payload_size;
+ xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
- return -ETIMEDOUT;
+ if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
+ memcpy(xn->reply.iov_base, payload,
+ min_t(size_t, xn->reply.iov_len, payload_size));
+
+out_unlock:
+ idpf_vc_xn_unlock(xn);
+ /* we _cannot_ hold lock while calling complete */
+ complete(&xn->completed);
+
+ return err;
}
/**
- * idpf_min_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
+ * idpf_recv_mb_msg - Receive message over mailbox
+ * @adapter: Driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Will receive control queue message and posts the receive buffer. Returns 0
+ * on success and negative on failure.
*/
-static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter)
{
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
-}
+ struct idpf_ctlq_msg ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ int post_err, err;
+ u16 num_recv;
-/**
- * idpf_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout after 500ms
- * @err_check: check if this specific error bit is set
- *
- * Returns 0 on success, negative on failure.
- */
-static int idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
-{
- /* Increasing the timeout in __IDPF_INIT_SW flow to consider large
- * number of VF's mailbox message responses. When a message is received
- * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
- * the timeout expires. Only in the error case i.e. if no message is
- * received on mailbox, we wait for the complete timeout which is
- * less likely to happen.
- */
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO);
+ while (1) {
+ /* This will get <= num_recv messages and output how many
+ * actually received on num_recv.
+ */
+ num_recv = 1;
+ err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ if (err || !num_recv)
+ break;
+
+ if (ctlq_msg.data_len) {
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ } else {
+ dma_mem = NULL;
+ num_recv = 0;
+ }
+
+ if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
+ idpf_recv_event_msg(adapter, &ctlq_msg);
+ else
+ err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
+
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
+ adapter->hw.arq,
+ &num_recv, &dma_mem);
+
+ /* If post failed clear the only buffer we supplied */
+ if (post_err) {
+ if (dma_mem)
+ dmam_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
+ break;
+ }
+
+ /* virtchnl trying to shutdown, stop cleaning */
+ if (err == -ENXIO)
+ break;
+ }
+
+ return err;
}
/**
@@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
*/
static int idpf_send_ver_msg(struct idpf_adapter *adapter)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_version_info vvi;
+ ssize_t reply_sz;
+ u32 major, minor;
+ int err = 0;
if (adapter->virt_ver_maj) {
vvi.major = cpu_to_le32(adapter->virt_ver_maj);
@@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)
vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
}
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
- (u8 *)&vvi);
-}
-
-/**
- * idpf_recv_ver_msg - Receive virtchnl version message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
- * to send version message again, otherwise negative on failure.
- */
-static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
-{
- struct virtchnl2_version_info vvi;
- u32 major, minor;
- int err;
+ xn_params.vc_op = VIRTCHNL2_OP_VERSION;
+ xn_params.send_buf.iov_base = &vvi;
+ xn_params.send_buf.iov_len = sizeof(vvi);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
- sizeof(vvi));
- if (err)
- return err;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(vvi))
+ return -EIO;
major = le32_to_cpu(vvi.major);
minor = le32_to_cpu(vvi.minor);
if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
- dev_warn(&adapter->pdev->dev,
- "Virtchnl major version (%d) greater than supported\n",
- major);
-
+ dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
return -EINVAL;
}
if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
minor > IDPF_VIRTCHNL_VERSION_MINOR)
- dev_warn(&adapter->pdev->dev,
- "Virtchnl minor version (%d) didn't match\n", minor);
+ dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
/* If we have a mismatch, resend version to update receiver on what
* version we will use.
@@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
*/
static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
{
- struct virtchnl2_get_capabilities caps = { };
+ struct virtchnl2_get_capabilities caps = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
caps.csum_caps =
cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
@@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_PROMISC |
VIRTCHNL2_CAP_LOOPBACK);
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
- (u8 *)&caps);
-}
+ xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
+ xn_params.send_buf.iov_base = &caps;
+ xn_params.send_buf.iov_len = sizeof(caps);
+ xn_params.recv_buf.iov_base = &adapter->caps;
+ xn_params.recv_buf.iov_len = sizeof(adapter->caps);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
-/**
- * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl get capabilities message. Returns 0 on success, negative on
- * failure.
- */
-static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
-{
- return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
- sizeof(struct virtchnl2_get_capabilities));
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(adapter->caps))
+ return -EIO;
+
+ return 0;
}
/**
@@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q)
{
struct virtchnl2_create_vport *vport_msg;
+ struct idpf_vc_xn_params xn_params = {};
u16 idx = adapter->next_vport;
int err, buf_size;
+ ssize_t reply_sz;
buf_size = sizeof(struct virtchnl2_create_vport);
if (!adapter->vport_params_reqd[idx]) {
@@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
return err;
}
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
- (u8 *)vport_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- if (err) {
- dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
-
- goto rel_lock;
- }
-
if (!adapter->vport_params_recvd[idx]) {
adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
GFP_KERNEL);
if (!adapter->vport_params_recvd[idx]) {
err = -ENOMEM;
- goto rel_lock;
+ goto free_vport_params;
}
}
- vport_msg = adapter->vport_params_recvd[idx];
- memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
+ xn_params.send_buf.iov_base = vport_msg;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto free_vport_params;
+ }
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
+ err = -EIO;
+ goto free_vport_params;
+ }
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
+ return 0;
+
+free_vport_params:
+ kfree(adapter->vport_params_recvd[idx]);
+ adapter->vport_params_recvd[idx] = NULL;
+ kfree(adapter->vport_params_reqd[idx]);
+ adapter->vport_params_reqd[idx] = NULL;
return err;
}
@@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
*/
int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1397,26 +1377,19 @@ rel_lock:
*/
int idpf_send_enable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1428,26 +1401,19 @@ rel_lock:
*/
int idpf_send_disable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1459,11 +1425,13 @@ rel_lock:
*/
static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_tx_queues *ctq;
+ struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
+ struct virtchnl2_txq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_txq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_txq + vport->num_complq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
@@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!ctq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(ctq, 0, buf_sz);
@@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_TX_QUEUES,
- buf_sz, (u8 *)ctq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = ctq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ctq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
@@ -1591,11 +1544,13 @@ error:
*/
static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_rx_queues *crq;
+ struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
+ struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_rxq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_rxq + vport->num_bufq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
@@ -1676,10 +1631,8 @@ common_qi_fields:
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1693,12 +1646,11 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!crq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(crq, 0, buf_sz);
@@ -1706,17 +1658,11 @@ common_qi_fields:
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_RX_QUEUES,
- buf_sz, (u8 *)crq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = crq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1725,42 +1671,28 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(crq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
* idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
* queues message
* @vport: virtual port data structure
- * @vc_op: virtchnl op code to send
+ * @ena: if true enable, false disable
*
* Send enable or disable queues virtchnl message. Returns 0 on success,
* negative on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
{
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_queue_chunks *qcs;
- struct virtchnl2_queue_chunk *qc;
u32 config_sz, chunk_sz, buf_sz;
- int i, j, k = 0, err = 0;
-
- /* validate virtchnl op */
- switch (vc_op) {
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- break;
- default:
- return -EINVAL;
- }
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_txq = vport->num_txq + vport->num_complq;
num_rxq = vport->num_rxq + vport->num_bufq;
@@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->txq_model))
goto setup_rx;
@@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
- if (vport->num_complq != (k - vport->num_txq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_complq != (k - vport->num_txq))
+ return -EINVAL;
setup_rx:
for (i = 0; i < vport->num_rxq_grp; i++) {
@@ -1823,10 +1751,8 @@ setup_rx:
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->rxq_model))
goto send_msg;
@@ -1845,10 +1771,8 @@ setup_rx:
}
if (vport->num_bufq != k - (vport->num_txq +
vport->num_complq +
- vport->num_rxq)) {
- err = -EINVAL;
- goto error;
- }
+ vport->num_rxq))
+ return -EINVAL;
send_msg:
/* Chunk up the queue info into multiple messages */
@@ -1861,12 +1785,16 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!eq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (ena) {
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(eq, 0, buf_sz);
@@ -1875,20 +1803,11 @@ send_msg:
qcs = &eq->chunks;
memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
- if (err)
- goto mbx_error;
-
- if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- else
- err = idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -1897,13 +1816,7 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
-error:
- kfree(qc);
-
- return err;
+ return 0;
}
/**
@@ -1917,12 +1830,13 @@ error:
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_queue_vector_maps *vqvm;
- struct virtchnl2_queue_vector *vqv;
+ struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
u32 num_msgs, num_chunks, num_q;
- int i, j, k = 0, err = 0;
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_q = vport->num_txq + vport->num_rxq;
@@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
@@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_complq)
+ return -EINVAL;
} else {
- if (vport->num_rxq != k - vport->num_txq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_txq)
+ return -EINVAL;
}
/* Chunk up the vector info into multiple messages */
@@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm) {
- err = -ENOMEM;
- goto error;
- }
+ if (!vqvm)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (map) {
+ xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(vqvm, 0, buf_sz);
+ xn_params.send_buf.iov_base = vqvm;
+ xn_params.send_buf.iov_len = buf_sz;
vqvm->vport_id = cpu_to_le32(vport->vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
- if (map) {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- } else {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err =
- idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- }
- if (err)
- goto mbx_error;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(vqvm);
-error:
- kfree(vqv);
-
- return err;
+ return 0;
}
/**
@@ -2062,7 +1953,7 @@ error:
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
+ return idpf_send_ena_dis_queues_msg(vport, true);
}
/**
@@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err, i;
- err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
+ err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
@@ -2087,8 +1978,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
/* schedule the napi to receive all the marker packets */
+ local_bh_disable();
for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi);
+ local_bh_enable();
return idpf_wait_for_marker_event(vport);
}
@@ -2122,22 +2015,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun
*/
int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_create_vport *vport_params;
struct virtchnl2_queue_reg_chunks *chunks;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx;
- int buf_size, err;
+ ssize_t reply_sz;
u16 num_chunks;
+ int buf_size;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
+ chunks = &vport_config->req_qs_chunks->chunks;
} else {
- vport_params = adapter->vport_params_recvd[vport_idx];
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
chunks = &vport_params->chunks;
}
@@ -2154,21 +2046,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
num_chunks);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
- buf_size, (u8 *)eq);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
+ xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2203,14 +2087,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport)
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- struct virtchnl2_add_queues aq = { };
- struct virtchnl2_add_queues *vc_msg;
+ struct virtchnl2_add_queues aq = {};
u16 vport_idx = vport->idx;
- int size, err;
+ ssize_t reply_sz;
+ int size;
+
+ vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!vc_msg)
+ return -ENOMEM;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
+ kfree(vport_config->req_qs_chunks);
+ vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q);
@@ -2218,47 +2109,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
aq.num_rx_q = cpu_to_le16(num_rx_q);
aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
- mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
- sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
- if (err)
- goto rel_lock;
-
- /* We want vport to be const to prevent incidental code changes making
- * changes to the vport config. We're making a special exception here
- * to discard const to use the virtchnl.
- */
- err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
- IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
- if (err)
- goto rel_lock;
-
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
+ xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &aq;
+ xn_params.send_buf.iov_len = sizeof(aq);
+ xn_params.recv_buf.iov_base = vc_msg;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
/* compare vc_msg num queues with vport num queues */
if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
- err = -EINVAL;
- goto rel_lock;
- }
+ le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
le16_to_cpu(vc_msg->chunks.num_chunks));
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
-rel_lock:
- mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
+ vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
+ if (!vport_config->req_qs_chunks)
+ return -ENOMEM;
- return err;
+ return 0;
}
/**
@@ -2270,53 +2147,49 @@ rel_lock:
*/
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
{
- struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
- struct virtchnl2_alloc_vectors ac = { };
+ struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
+ struct virtchnl2_alloc_vectors ac = {};
+ ssize_t reply_sz;
u16 num_vchunks;
- int size, err;
+ int size;
ac.num_vectors = cpu_to_le16(num_vectors);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
- sizeof(ac), (u8 *)&ac);
- if (err)
- goto rel_lock;
+ rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_vec)
+ return -ENOMEM;
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
+ xn_params.send_buf.iov_base = &ac;
+ xn_params.send_buf.iov_len = sizeof(ac);
+ xn_params.recv_buf.iov_base = rcvd_vec;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
-
size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
- if (size > sizeof(adapter->vc_msg)) {
- err = -EINVAL;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
kfree(adapter->req_vec_chunks);
- adapter->req_vec_chunks = NULL;
- adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
- if (!adapter->req_vec_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
+ if (!adapter->req_vec_chunks)
+ return -ENOMEM;
- alloc_vec = adapter->req_vec_chunks;
- if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
+ if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
- err = -EINVAL;
+ return -EINVAL;
}
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2329,29 +2202,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
- int buf_size, err;
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+ int buf_size;
buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
- (u8 *)vcs);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
+ xn_params.send_buf.iov_base = vcs;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2374,25 +2242,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter)
*/
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
{
- struct virtchnl2_sriov_vfs_info svi = { };
- int err;
+ struct virtchnl2_sriov_vfs_info svi = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
svi.num_vfs = cpu_to_le16(num_vfs);
+ xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &svi;
+ xn_params.send_buf.iov_len = sizeof(svi);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
- sizeof(svi), (u8 *)&svi);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
-
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2405,10 +2266,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_vport_stats stats_msg = { };
- struct virtchnl2_vport_stats *stats;
- int err;
+ struct virtchnl2_vport_stats stats_msg = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
/* Don't send get_stats message if the link is down */
if (np->state <= __IDPF_VPORT_DOWN)
@@ -2416,46 +2277,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
+ xn_params.send_buf.iov_base = &stats_msg;
+ xn_params.send_buf.iov_len = sizeof(stats_msg);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
- sizeof(struct virtchnl2_vport_stats),
- (u8 *)&stats_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- if (err)
- goto rel_lock;
-
- stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(stats_msg))
+ return -EIO;
spin_lock_bh(&np->stats_lock);
- netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
- le64_to_cpu(stats->rx_multicast) +
- le64_to_cpu(stats->rx_broadcast);
- netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
- netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
- netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
- netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
-
- netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
- le64_to_cpu(stats->tx_multicast) +
- le64_to_cpu(stats->tx_broadcast);
- netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
- netstats->tx_errors = le64_to_cpu(stats->tx_errors);
- netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
-
- vport->port_stats.vport_stats = *stats;
+ netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
+ le64_to_cpu(stats_msg.rx_multicast) +
+ le64_to_cpu(stats_msg.rx_broadcast);
+ netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
+ le64_to_cpu(stats_msg.tx_multicast) +
+ le64_to_cpu(stats_msg.tx_broadcast);
+ netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
+ netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
+ netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
+ netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
+ netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
+ netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
+
+ vport->port_stats.vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2467,70 +2320,70 @@ rel_lock:
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_lut *recv_rl;
+ struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
+ struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_lut *rl;
int buf_size, lut_buf_size;
- int i, err;
+ ssize_t reply_sz;
+ int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
rl->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
- if (!get) {
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = rl;
+ xn_params.send_buf.iov_len = buf_size;
+
+ if (get) {
+ recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rl)
+ return -ENOMEM;
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
+ xn_params.recv_buf.iov_base = recv_rl;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ } else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
-
- goto free_mem;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_lut))
+ return -EIO;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
+ lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
+ if (reply_sz < lut_buf_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- if (err)
- goto free_mem;
-
- recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
+ /* size didn't change, we can reuse existing lut buf */
if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
goto do_memcpy;
rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
kfree(rss_data->rss_lut);
- lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
if (!rss_data->rss_lut) {
rss_data->rss_lut_size = 0;
- err = -ENOMEM;
- goto free_mem;
+ return -ENOMEM;
}
do_memcpy:
- memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
-free_mem:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rl);
+ memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
- return err;
+ return 0;
}
/**
@@ -2542,68 +2395,70 @@ free_mem:
*/
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_key *recv_rk;
+ struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
+ struct virtchnl2_rss_key *rk __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_key *rk;
- int i, buf_size, err;
+ ssize_t reply_sz;
+ int i, buf_size;
+ u16 key_size;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
rk->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
+ xn_params.send_buf.iov_base = rk;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
if (get) {
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- if (err)
- goto error;
-
- recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
- if (rss_data->rss_key_size !=
- le16_to_cpu(recv_rk->key_len)) {
- rss_data->rss_key_size =
- min_t(u16, NETDEV_RSS_KEY_LEN,
- le16_to_cpu(recv_rk->key_len));
- kfree(rss_data->rss_key);
- rss_data->rss_key = kzalloc(rss_data->rss_key_size,
- GFP_KERNEL);
- if (!rss_data->rss_key) {
- rss_data->rss_key_size = 0;
- err = -ENOMEM;
- goto error;
- }
- }
- memcpy(rss_data->rss_key, recv_rk->key_flex,
- rss_data->rss_key_size);
+ recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rk)
+ return -ENOMEM;
+
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
+ xn_params.recv_buf.iov_base = recv_rk;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
} else {
rk->key_len = cpu_to_le16(rss_data->rss_key_size);
for (i = 0; i < rss_data->rss_key_size; i++)
rk->key_flex[i] = rss_data->rss_key[i];
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
+ }
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_key))
+ return -EIO;
+
+ key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(recv_rk->key_len));
+ if (reply_sz < key_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
+ /* key len didn't change, reuse existing buf */
+ if (rss_data->rss_key_size == key_size)
+ goto do_memcpy;
+
+ rss_data->rss_key_size = key_size;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ rss_data->rss_key_size = 0;
+ return -ENOMEM;
}
-error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rk);
+do_memcpy:
+ memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
- return err;
+ return 0;
}
/**
@@ -2655,13 +2510,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
*/
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
+ struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
+ struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
- struct virtchnl2_get_ptype_info get_ptype_info;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_get_ptype_info *ptype_info;
+ struct idpf_vc_xn_params xn_params = {};
u16 next_ptype_id = 0;
- int err = 0, i, j, k;
+ ssize_t reply_sz;
+ int i, j, k;
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
@@ -2670,43 +2527,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
+ if (!get_ptype_info)
+ return -ENOMEM;
+
ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!ptype_info)
return -ENOMEM;
- mutex_lock(&adapter->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
+ xn_params.send_buf.iov_base = get_ptype_info;
+ xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
+ xn_params.recv_buf.iov_base = ptype_info;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
while (next_ptype_id < max_ptype) {
- get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
+ get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(max_ptype - next_ptype_id);
else
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
- sizeof(struct virtchnl2_get_ptype_info),
- (u8 *)&get_ptype_info);
- if (err)
- goto vc_buf_unlock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- if (err)
- goto vc_buf_unlock;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
+ return -EIO;
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
- if (ptypes_recvd > max_ptype) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptypes_recvd > max_ptype)
+ return -EINVAL;
- next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
- le16_to_cpu(get_ptype_info.num_ptypes);
+ next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
+ le16_to_cpu(get_ptype_info->num_ptypes);
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
@@ -2719,17 +2577,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
((u8 *)ptype_info + ptype_offset);
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
- if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID) {
- err = 0;
- goto vc_buf_unlock;
- }
+ IDPF_INVALID_PTYPE_ID)
+ return 0;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
@@ -2857,11 +2711,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
}
}
-vc_buf_unlock:
- mutex_unlock(&adapter->vc_buf_lock);
- kfree(ptype_info);
-
- return err;
+ return 0;
}
/**
@@ -2873,27 +2723,20 @@ vc_buf_unlock:
*/
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
- int err;
+ ssize_t reply_sz;
loopback.vport_id = cpu_to_le32(vport->vport_id);
loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
- sizeof(loopback), (u8 *)&loopback);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &loopback;
+ xn_params.send_buf.iov_len = sizeof(loopback);
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2958,7 +2801,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
return -ENOENT;
}
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
return 0;
}
@@ -3055,35 +2898,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
u16 num_max_vports;
int err = 0;
+ if (!adapter->vcxn_mngr) {
+ adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
+ if (!adapter->vcxn_mngr) {
+ err = -ENOMEM;
+ goto init_failed;
+ }
+ }
+ idpf_vc_xn_init(adapter->vcxn_mngr);
+
while (adapter->state != __IDPF_INIT_SW) {
switch (adapter->state) {
- case __IDPF_STARTUP:
- if (idpf_send_ver_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_VER_CHECK;
- goto restart;
case __IDPF_VER_CHECK:
- err = idpf_recv_ver_msg(adapter);
- if (err == -EIO) {
- return err;
- } else if (err == -EAGAIN) {
- adapter->state = __IDPF_STARTUP;
+ err = idpf_send_ver_msg(adapter);
+ switch (err) {
+ case 0:
+ /* success, move state machine forward */
+ adapter->state = __IDPF_GET_CAPS;
+ fallthrough;
+ case -EAGAIN:
goto restart;
- } else if (err) {
+ default:
+ /* Something bad happened, try again but only a
+ * few times.
+ */
goto init_failed;
}
- if (idpf_send_get_caps_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_GET_CAPS;
- goto restart;
case __IDPF_GET_CAPS:
- if (idpf_recv_get_caps_msg(adapter))
+ err = idpf_send_get_caps_msg(adapter);
+ if (err)
goto init_failed;
adapter->state = __IDPF_INIT_SW;
break;
default:
dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
adapter->state);
+ err = -EINVAL;
goto init_failed;
}
break;
@@ -3142,7 +2992,9 @@ restart:
queue_delayed_work(adapter->init_wq, &adapter->init_task,
msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
- goto no_err;
+ set_bit(IDPF_VC_CORE_INIT, adapter->flags);
+
+ return 0;
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
@@ -3151,7 +3003,6 @@ err_intr_req:
err_netdev_alloc:
kfree(adapter->vports);
adapter->vports = NULL;
-no_err:
return err;
init_failed:
@@ -3168,7 +3019,9 @@ init_failed:
* register writes might not have taken effect. Retry to initialize
* the mailbox again
*/
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
+ if (adapter->vcxn_mngr)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
@@ -3184,29 +3037,22 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
- int i;
+ if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
+ return;
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- /* Set all bits as we dont know on which vc_state the vhnl_wq is
- * waiting on and wakeup the virtchnl workqueue even if it is waiting
- * for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, adapter->vc_state);
- wake_up(&adapter->vchnl_wq);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter);
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, adapter->vc_state);
-
kfree(adapter->vports);
adapter->vports = NULL;
+
+ clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
}
/**
@@ -3285,6 +3131,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
+ idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
+
idpf_vport_init_num_qs(vport, vport_msg);
idpf_vport_calc_num_q_desc(vport);
idpf_vport_calc_num_q_groups(vport);
@@ -3620,6 +3468,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
}
/**
+ * idpf_mac_filter_async_handler - Async callback for mac filters
+ * @adapter: private data struct
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
+ * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
+ * situation to deal with errors returned on the reply. The best we can
+ * ultimately do is remove it from our list of mac filters and report the
+ * error.
+ */
+static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_mac_addr_list *ma_list;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_mac_addr *mac_addr;
+ struct idpf_mac_filter *f, *tmp;
+ struct list_head *ma_list_head;
+ struct idpf_vport *vport;
+ u16 num_entries;
+ int i;
+
+ /* if success we're done, we're only here if something bad happened */
+ if (!ctlq_msg->cookie.mbx.chnl_retval)
+ return 0;
+
+ /* make sure at least struct is there */
+ if (xn->reply_sz < sizeof(*ma_list))
+ goto invalid_payload;
+
+ ma_list = ctlq_msg->ctx.indirect.payload->va;
+ mac_addr = ma_list->mac_addr_list;
+ num_entries = le16_to_cpu(ma_list->num_mac_addr);
+ /* we should have received a buffer at least this big */
+ if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
+ goto invalid_payload;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
+ if (!vport)
+ goto invalid_payload;
+
+ vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
+ ma_list_head = &vport_config->user_config.mac_filter_list;
+
+ /* We can't do much to reconcile bad filters at this point, however we
+ * should at least remove them from our list one way or the other so we
+ * have some idea what good filters we have.
+ */
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ list_for_each_entry_safe(f, tmp, ma_list_head, list)
+ for (i = 0; i < num_entries; i++)
+ if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
+ list_del(&f->list);
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+ dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
+ xn->vc_op);
+
+ return 0;
+
+invalid_payload:
+ dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
+ xn->vc_op, xn->reply_sz);
+
+ return -EINVAL;
+}
+
+/**
* idpf_add_del_mac_filters - Add/del mac filters
* @vport: Virtual port data structure
* @np: Netdev private structure
@@ -3632,17 +3549,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
bool add, bool async)
{
- struct virtchnl2_mac_addr_list *ma_list = NULL;
+ struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
+ struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
struct idpf_adapter *adapter = np->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- enum idpf_vport_config_flags mac_flag;
- struct pci_dev *pdev = adapter->pdev;
- enum idpf_vport_vc_state vc, vc_err;
- struct virtchnl2_mac_addr *mac_addr;
- struct idpf_mac_filter *f, *tmp;
u32 num_msgs, total_filters = 0;
- int i = 0, k, err = 0;
- u32 vop;
+ struct idpf_mac_filter *f;
+ ssize_t reply_sz;
+ int i = 0, k;
+
+ xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
+ VIRTCHNL2_OP_DEL_MAC_ADDR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = async;
+ xn_params.async_handler = idpf_mac_filter_async_handler;
vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
@@ -3666,13 +3587,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
GFP_ATOMIC);
if (!mac_addr) {
- err = -ENOMEM;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- goto error;
+
+ return -ENOMEM;
}
- list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
- list) {
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
+ list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
i++;
@@ -3691,26 +3612,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (add) {
- vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
- vc = IDPF_VC_ADD_MAC_ADDR;
- vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_ADD_MAC_REQ;
- } else {
- vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
- vc = IDPF_VC_DEL_MAC_ADDR;
- vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_DEL_MAC_REQ;
- }
-
/* Chunk up the filters into multiple messages to avoid
* sending a control queue message buffer that is too large
*/
num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
- if (!async)
- mutex_lock(&vport->vc_buf_lock);
-
for (i = 0, k = 0; i < num_msgs; i++) {
u32 entries_size, buf_size, num_entries;
@@ -3722,10 +3628,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
kfree(ma_list);
ma_list = kzalloc(buf_size, GFP_ATOMIC);
- if (!ma_list) {
- err = -ENOMEM;
- goto list_prep_error;
- }
+ if (!ma_list)
+ return -ENOMEM;
} else {
memset(ma_list, 0, buf_size);
}
@@ -3734,34 +3638,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
- if (async)
- set_bit(mac_flag, vport_config->flags);
-
- err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
- if (err)
- goto mbx_error;
-
- if (!async) {
- err = idpf_wait_for_event(adapter, vport, vc, vc_err);
- if (err)
- goto mbx_error;
- }
+ xn_params.send_buf.iov_base = ma_list;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_entries;
total_filters -= num_entries;
}
-mbx_error:
- if (!async)
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ma_list);
-list_prep_error:
- kfree(mac_addr);
-error:
- if (err)
- dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
-
- return err;
+ return 0;
}
/**
@@ -3778,9 +3665,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_promisc_info vpi;
+ ssize_t reply_sz;
u16 flags = 0;
- int err;
if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
flags |= VIRTCHNL2_UNICAST_PROMISC;
@@ -3790,9 +3678,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
vpi.vport_id = cpu_to_le32(vport_id);
vpi.flags = cpu_to_le16(flags);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
- sizeof(struct virtchnl2_promisc_info),
- (u8 *)&vpi);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &vpi;
+ xn_params.send_buf.iov_len = sizeof(vpi);
+ /* setting promiscuous is only ever done asynchronously */
+ xn_params.async = true;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
new file mode 100644
index 000000000000..83da5d8da56b
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_VIRTCHNL_H_
+#define _IDPF_VIRTCHNL_H_
+
+struct idpf_adapter;
+struct idpf_netdev_priv;
+struct idpf_vec_regs;
+struct idpf_vport;
+struct idpf_vport_max_q;
+struct idpf_vport_user_config_data;
+
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
+int idpf_vc_core_init(struct idpf_adapter *adapter);
+void idpf_vc_core_deinit(struct idpf_adapter *adapter);
+
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals);
+int idpf_queue_reg_init(struct idpf_vport *vport);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+
+int idpf_recv_mb_msg(struct idpf_adapter *adapter);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg, u16 cookie);
+
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+u32 idpf_get_vport_id(struct idpf_vport *vport);
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
+int idpf_send_enable_vport_msg(struct idpf_vport *vport);
+int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+
+int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
+int idpf_send_delete_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks);
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async);
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id);
+int idpf_check_supported_desc_ids(struct idpf_vport *vport);
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
+int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+
+#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 07e72c72d156..63deb120359c 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -4,6 +4,8 @@
#ifndef _VIRTCHNL2_H_
#define _VIRTCHNL2_H_
+#include <linux/if_ether.h>
+
/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
* VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
* and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
@@ -17,8 +19,6 @@
* must remain unchanged over time, so we specify explicit values for all enums.
*/
-#include "virtchnl2_lan_desc.h"
-
/* This macro is used to generate compilation errors if a structure
* is not exactly the correct length.
*/
@@ -555,7 +555,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
struct virtchnl2_queue_reg_chunks {
__le16 num_chunks;
u8 pad[6];
- struct virtchnl2_queue_reg_chunk chunks[];
+ struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
@@ -703,7 +703,7 @@ struct virtchnl2_config_tx_queues {
__le32 vport_id;
__le16 num_qinfo;
u8 pad[10];
- struct virtchnl2_txq_info qinfo[];
+ struct virtchnl2_txq_info qinfo[] __counted_by_le(num_qinfo);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
@@ -782,7 +782,7 @@ struct virtchnl2_config_rx_queues {
__le32 vport_id;
__le16 num_qinfo;
u8 pad[18];
- struct virtchnl2_rxq_info qinfo[];
+ struct virtchnl2_rxq_info qinfo[] __counted_by_le(num_qinfo);
};
VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
@@ -868,7 +868,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
struct virtchnl2_vector_chunks {
__le16 num_vchunks;
u8 pad[14];
- struct virtchnl2_vector_chunk vchunks[];
+ struct virtchnl2_vector_chunk vchunks[] __counted_by_le(num_vchunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
@@ -912,7 +912,7 @@ struct virtchnl2_rss_lut {
__le16 lut_entries_start;
__le16 lut_entries;
u8 pad[4];
- __le32 lut[];
+ __le32 lut[] __counted_by_le(lut_entries);
};
VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
@@ -977,8 +977,8 @@ struct virtchnl2_ptype {
u8 ptype_id_8;
u8 proto_id_count;
__le16 pad;
- __le16 proto_id[];
-};
+ __le16 proto_id[] __counted_by(proto_id_count);
+} __packed __aligned(2);
VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
/**
@@ -1104,9 +1104,9 @@ struct virtchnl2_rss_key {
__le32 vport_id;
__le16 key_len;
u8 pad;
- __DECLARE_FLEX_ARRAY(u8, key_flex);
-};
-VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
+ u8 key_flex[] __counted_by_le(key_len);
+} __packed;
+VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
/**
* struct virtchnl2_queue_chunk - chunk of contiguous queues
@@ -1131,7 +1131,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
struct virtchnl2_queue_chunks {
__le16 num_chunks;
u8 pad[6];
- struct virtchnl2_queue_chunk chunks[];
+ struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
@@ -1195,7 +1195,7 @@ struct virtchnl2_queue_vector_maps {
__le32 vport_id;
__le16 num_qv_maps;
u8 pad[10];
- struct virtchnl2_queue_vector qv_maps[];
+ struct virtchnl2_queue_vector qv_maps[] __counted_by_le(num_qv_maps);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
@@ -1247,7 +1247,7 @@ struct virtchnl2_mac_addr_list {
__le32 vport_id;
__le16 num_mac_addr;
u8 pad[2];
- struct virtchnl2_mac_addr mac_addr_list[];
+ struct virtchnl2_mac_addr mac_addr_list[] __counted_by_le(num_mac_addr);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 8d6e44ee1895..64dfc362d1dc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -222,8 +222,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
}
/* set lan id */
- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
- E1000_STATUS_FUNC_SHIFT;
+ hw->bus.func = FIELD_GET(E1000_STATUS_FUNC_MASK, rd32(E1000_STATUS));
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
@@ -262,8 +261,8 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
if (ret_val)
goto out;
- data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
- E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ data = FIELD_GET(E1000_M88E1112_MAC_CTRL_1_MODE_MASK,
+ data);
if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
data == E1000_M88E1112_AUTO_COPPER_BASEX)
hw->mac.ops.check_for_link =
@@ -330,8 +329,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
u32 eecd = rd32(E1000_EECD);
u16 size;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -2798,7 +2796,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg)
!= NVM_ETS_TYPE_EMC)
return E1000_NOT_IMPLEMENTED;
@@ -2808,10 +2806,8 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
for (i = 1; i < num_sensors; i++) {
hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
- NVM_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
- NVM_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor);
+ sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor);
if (sensor_location != 0)
hw->phy.ops.read_i2c_byte(hw,
@@ -2859,20 +2855,17 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg)
!= NVM_ETS_TYPE_EMC)
return E1000_NOT_IMPLEMENTED;
- low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
- NVM_ETS_LTHRES_DELTA_SHIFT);
+ low_thresh_delta = FIELD_GET(NVM_ETS_LTHRES_DELTA_MASK, ets_cfg);
num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
for (i = 1; i <= num_sensors; i++) {
hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
- NVM_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
- NVM_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor);
+ sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor);
therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
hw->phy.ops.write_i2c_byte(hw,
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index b9b9d35494d2..503b239868e8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -5,9 +5,9 @@
* e1000_i211
*/
-#include <linux/types.h>
+#include <linux/bitfield.h>
#include <linux/if_ether.h>
-
+#include <linux/types.h>
#include "e1000_hw.h"
#include "e1000_i210.h"
@@ -473,7 +473,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record);
status = 0;
break;
}
@@ -483,8 +483,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
(i != 1))) {
- version = (*next_record & E1000_INVM_VER_FIELD_TWO)
- >> 13;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_TWO,
+ *next_record);
status = 0;
break;
}
@@ -493,15 +493,15 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
*/
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record);
status = 0;
break;
}
}
if (!status) {
- invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
- >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_major = FIELD_GET(E1000_INVM_MAJOR_MASK,
+ version);
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
}
/* Read Image Type */
@@ -520,7 +520,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
((((*record & 0x3) != 0) && (i != 1)))) {
invm_ver->invm_img_type =
- (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ FIELD_GET(E1000_INVM_IMGTYPE_FIELD,
+ *next_record);
status = 0;
break;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index caf91c6f52b4..fa3dfafd2bb1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include <linux/if_ether.h>
#include <linux/delay.h>
#include <linux/pci.h>
@@ -50,13 +51,12 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
break;
}
- bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT);
+ bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW,
+ pcie_link_status);
}
reg = rd32(E1000_STATUS);
- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+ bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index fa136e6e9328..2dcd64d6dec3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2018 Intel Corporation. */
-#include <linux/if_ether.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
-
+#include <linux/if_ether.h>
#include "e1000_mac.h"
#include "e1000_nvm.h"
@@ -708,10 +708,10 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
*/
if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
- >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
- >> NVM_MINOR_SHIFT;
+ fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK,
+ fw_version);
+ fw_vers->eep_minor = FIELD_GET(NVM_MINOR_MASK,
+ fw_version);
fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
goto etrack_id;
}
@@ -753,15 +753,13 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
return;
}
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
- >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK, fw_version);
/* check for old style version format in newer images*/
if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
} else {
- eeprom_verl = (fw_version & NVM_MINOR_MASK)
- >> NVM_MINOR_SHIFT;
+ eeprom_verl = FIELD_GET(NVM_MINOR_MASK, fw_version);
}
/* Convert minor value to hex before assigning to output struct
* Val to be converted will not be higher than 99, per tool output
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index a018000f7db9..cd65008c7ef5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2018 Intel Corporation. */
-#include <linux/if_ether.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
-
+#include <linux/if_ether.h>
#include "e1000_mac.h"
#include "e1000_phy.h"
@@ -255,7 +255,7 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
}
/* Need to byte-swap the 16-bit value. */
- *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+ *data = ((i2ccmd >> 8) & 0x00FF) | FIELD_PREP(0xFF00, i2ccmd);
return 0;
}
@@ -282,7 +282,7 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
}
/* Swap the data bytes for the I2C interface */
- phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+ phy_data_swapped = ((data >> 8) & 0x00FF) | FIELD_PREP(0xFF00, data);
/* Set up Op-code, Phy Address, and register address in the I2CCMD
* register. The MAC will take care of interfacing with the
@@ -1682,8 +1682,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
goto out;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
@@ -1796,8 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
if (ret_val)
goto out;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
@@ -2578,8 +2576,7 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
if (ret_val)
goto out;
- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
- I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+ length = FIELD_GET(I82580_DSTATUS_CABLE_LENGTH, phy_data);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
ret_val = -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index a2b759531cb7..3c2dc7bdebb5 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -637,7 +637,7 @@ struct igb_adapter {
struct timespec64 period;
} perout[IGB_N_PEROUT];
- char fw_version[32];
+ char fw_version[48];
#ifdef CONFIG_IGB_HWMON
struct hwmon_buff *igb_hwmon_buff;
bool ets;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 16d2a55d5e17..61d72250c0ed 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2356,11 +2356,9 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
break;
case ETH_SS_STATS:
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igb_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, igb_gstrings_stats[i].stat_string);
for (i = 0; i < IGB_NETDEV_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igb_gstrings_net_stats[i].stat_string);
+ ethtool_puts(&p, igb_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -2434,7 +2432,7 @@ static int igb_get_ts_info(struct net_device *dev,
}
}
-#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(FIELD_MAX(U16_MAX))
static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@@ -2713,8 +2711,7 @@ static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter,
etqf |= (etype & E1000_ETQF_ETYPE_MASK);
etqf &= ~E1000_ETQF_QUEUE_MASK;
- etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT)
- & E1000_ETQF_QUEUE_MASK);
+ etqf |= FIELD_PREP(E1000_ETQF_QUEUE_MASK, input->action);
etqf |= E1000_ETQF_QUEUE_ENABLE;
wr32(E1000_ETQF(i), etqf);
@@ -2733,8 +2730,8 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
u32 vlapqf;
vlapqf = rd32(E1000_VLAPQF);
- vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
- >> VLAN_PRIO_SHIFT;
+ vlan_priority = FIELD_GET(VLAN_PRIO_MASK,
+ ntohs(input->filter.vlan_tci));
queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK;
/* check whether this vlan prio is already set */
@@ -2817,7 +2814,7 @@ static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter,
u8 vlan_priority;
u32 vlapqf;
- vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan_priority = FIELD_GET(VLAN_PRIO_MASK, vlan_tci);
vlapqf = rd32(E1000_VLAPQF);
vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority);
@@ -3030,7 +3027,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int igb_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3041,11 +3038,13 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- edata->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
if (!hw->dev_spec._82575.eee_disable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
/* The IPCNFG and EEER registers are not supported on I354. */
if (hw->mac.type == e1000_i354) {
@@ -3071,7 +3070,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
case e1000_i354:
case e1000_i210:
@@ -3082,7 +3081,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
default:
@@ -3102,18 +3101,20 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igb_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
bool adv1g_eee = true, adv100m_eee = true;
s32 ret_val;
@@ -3121,7 +3122,7 @@ static int igb_set_eee(struct net_device *netdev,
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igb_get_eee(netdev, &eee_curr);
if (ret_val)
@@ -3141,14 +3142,21 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (!edata->advertised || (edata->advertised &
- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
dev_err(&adapter->pdev->dev,
"EEE Advertisement supports only 100Tx and/or 100T full duplex\n");
return -EINVAL;
}
- adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
- adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
+ adv100m_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->advertised);
+ adv1g_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised);
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
@@ -3156,7 +3164,7 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
adapter->flags |= IGB_FLAG_EEE;
@@ -3264,36 +3272,22 @@ static int igb_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int igb_ethtool_begin(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_get_sync(&adapter->pdev->dev);
- return 0;
-}
-
-static void igb_ethtool_complete(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_put(&adapter->pdev->dev);
-}
-
static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
{
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int igb_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < IGB_RETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
@@ -3333,8 +3327,9 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
}
-static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int igb_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3342,10 +3337,11 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
u32 num_queues;
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
num_queues = adapter->rss_queues;
@@ -3362,12 +3358,12 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
/* Verify user input. */
for (i = 0; i < IGB_RETA_SIZE; i++)
- if (indir[i] >= num_queues)
+ if (rxfh->indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGB_RETA_SIZE; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
igb_write_rss_indir_tbl(adapter);
@@ -3499,8 +3495,6 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_channels = igb_set_channels,
.get_priv_flags = igb_get_priv_flags,
.set_priv_flags = igb_set_priv_flags,
- .begin = igb_ethtool_begin,
- .complete = igb_ethtool_complete,
.get_link_ksettings = igb_get_link_ksettings,
.set_link_ksettings = igb_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b2295caa2f0a..fce2930ae6af 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -106,8 +106,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
-static int igb_probe(struct pci_dev *, const struct pci_device_id *);
-static void igb_remove(struct pci_dev *pdev);
static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *);
@@ -178,20 +176,6 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
#endif
-static int igb_suspend(struct device *);
-static int igb_resume(struct device *);
-static int igb_runtime_suspend(struct device *dev);
-static int igb_runtime_resume(struct device *dev);
-static int igb_runtime_idle(struct device *dev);
-#ifdef CONFIG_PM
-static const struct dev_pm_ops igb_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
- SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
- igb_runtime_idle)
-};
-#endif
-static void igb_shutdown(struct pci_dev *);
-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
@@ -202,7 +186,7 @@ static struct notifier_block dca_notifier = {
#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
-module_param(max_vfs, uint, 0);
+module_param(max_vfs, uint, 0444);
MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
@@ -219,19 +203,6 @@ static const struct pci_error_handlers igb_err_handler = {
static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
-static struct pci_driver igb_driver = {
- .name = igb_driver_name,
- .id_table = igb_pci_tbl,
- .probe = igb_probe,
- .remove = igb_remove,
-#ifdef CONFIG_PM
- .driver.pm = &igb_pm_ops,
-#endif
- .shutdown = igb_shutdown,
- .sriov_configure = igb_pci_sriov_configure,
- .err_handler = &igb_err_handler
-};
-
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL v2");
@@ -647,6 +618,8 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
return adapter->netdev;
}
+static struct pci_driver igb_driver;
+
/**
* igb_init_module - Driver Registration Routine
*
@@ -2538,7 +2511,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -2624,6 +2597,9 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -3069,7 +3045,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_fw_version fw;
- char *lbuf;
igb_get_fw_version(hw, &fw);
@@ -3077,34 +3052,36 @@ void igb_set_fw_version(struct igb_adapter *adapter)
case e1000_i210:
case e1000_i211:
if (!(igb_get_flash_presence_i210(hw))) {
- lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor,
- fw.invm_img_type);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
break;
}
fallthrough;
default:
/* if option rom is valid, display its version too */
if (fw.or_valid) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id, fw.or_major, fw.or_build,
- fw.or_patch);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
} else if (fw.etrack_id != 0X0000) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
} else {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major,
- fw.eep_minor, fw.eep_build);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
-
- /* the truncate happens here if it doesn't fit */
- strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version));
- kfree(lbuf);
}
/**
@@ -6667,7 +6644,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igb_up(adapter);
@@ -6984,44 +6961,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 ack = 0, tsicr = rd32(E1000_TSICR);
+ u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= TSINTR_SYS_WRAP;
}
if (tsicr & E1000_TSICR_TXTS) {
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
- ack |= E1000_TSICR_TXTS;
}
- if (tsicr & TSINTR_TT0) {
+ if (tsicr & TSINTR_TT0)
igb_perout(adapter, 0);
- ack |= TSINTR_TT0;
- }
- if (tsicr & TSINTR_TT1) {
+ if (tsicr & TSINTR_TT1)
igb_perout(adapter, 1);
- ack |= TSINTR_TT1;
- }
- if (tsicr & TSINTR_AUTT0) {
+ if (tsicr & TSINTR_AUTT0)
igb_extts(adapter, 0);
- ack |= TSINTR_AUTT0;
- }
- if (tsicr & TSINTR_AUTT1) {
+ if (tsicr & TSINTR_AUTT1)
igb_extts(adapter, 1);
- ack |= TSINTR_AUTT1;
- }
-
- /* acknowledge the interrupts */
- wr32(E1000_TSICR, ack);
}
static irqreturn_t igb_msix_other(int irq, void *data)
@@ -7295,7 +7259,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
static int igb_set_vf_multicasts(struct igb_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int n = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
int i;
@@ -7555,7 +7519,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
{
- int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int add = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
int ret;
@@ -9465,12 +9429,12 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int __maybe_unused igb_suspend(struct device *dev)
+static int igb_suspend(struct device *dev)
{
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
+static int __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9526,12 +9490,12 @@ static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
return err;
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int igb_resume(struct device *dev)
{
return __igb_resume(dev, false);
}
-static int __maybe_unused igb_runtime_idle(struct device *dev)
+static int igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -9542,12 +9506,12 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
return -EBUSY;
}
-static int __maybe_unused igb_runtime_suspend(struct device *dev)
+static int igb_runtime_suspend(struct device *dev)
{
return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
-static int __maybe_unused igb_runtime_resume(struct device *dev)
+static int igb_runtime_resume(struct device *dev)
{
return __igb_resume(dev, true);
}
@@ -9810,8 +9774,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
- E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= FIELD_PREP(E1000_RTTBCNRC_RF_INT_MASK, rf_int);
bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
} else {
bcnrc_val = 0;
@@ -10000,8 +9963,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
hwm = 64 * (pba - 6);
reg = rd32(E1000_FCRTC);
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
- reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
- & E1000_FCRTC_RTH_COAL_MASK);
+ reg |= FIELD_PREP(E1000_FCRTC_RTH_COAL_MASK, hwm);
wr32(E1000_FCRTC, reg);
/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
@@ -10010,8 +9972,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
dmac_thr = pba - 10;
reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
- reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
- & E1000_DMACR_DMACTHR_MASK);
+ reg |= FIELD_PREP(E1000_DMACR_DMACTHR_MASK, dmac_thr);
/* transition to L0x or L1 if available..*/
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
@@ -10172,4 +10133,20 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter)
spin_unlock(&adapter->nfc_lock);
}
+
+static _DEFINE_DEV_PM_OPS(igb_pm_ops, igb_suspend, igb_resume,
+ igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle);
+
+static struct pci_driver igb_driver = {
+ .name = igb_driver_name,
+ .id_table = igb_pci_tbl,
+ .probe = igb_probe,
+ .remove = igb_remove,
+ .driver.pm = pm_ptr(&igb_pm_ops),
+ .shutdown = igb_shutdown,
+ .sriov_configure = igb_pci_sriov_configure,
+ .err_handler = &igb_err_handler
+};
+
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 319c544b9f04..f94570556120 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
/* adjust timestamp for the TX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_TX_LATENCY_10;
@@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
@@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index a3cd7ac48d4b..d15282ee5ea8 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include "mbx.h"
/**
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index fd712585af27..7661edd7d0f2 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -3,25 +3,25 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/tcp.h>
-#include <linux/ipv6.h>
-#include <linux/slab.h>
-#include <net/checksum.h>
-#include <net/ip6_checksum.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/ipv6.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
-
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
#include "igbvf.h"
char igbvf_driver_name[] = "igbvf";
@@ -273,9 +273,8 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
* that case, it fills the header buffer and spills the rest
* into the page.
*/
- hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
- & E1000_RXDADV_HDRBUFLEN_MASK) >>
- E1000_RXDADV_HDRBUFLEN_SHIFT;
+ hlen = le16_get_bits(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info,
+ E1000_RXDADV_HDRBUFLEN_MASK);
if (hlen > adapter->rx_ps_hdr_size)
hlen = adapter->rx_ps_hdr_size;
@@ -2435,7 +2434,7 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igbvf_up(adapter);
@@ -2471,7 +2470,7 @@ static int igbvf_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused igbvf_resume(struct device *dev_d)
+static int igbvf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2656,7 +2655,7 @@ igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -2958,7 +2957,7 @@ static const struct pci_device_id igbvf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
-static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
/* PCI Device API Driver */
static struct pci_driver igbvf_driver = {
@@ -2966,7 +2965,7 @@ static struct pci_driver igbvf_driver = {
.id_table = igbvf_pci_tbl,
.probe = igbvf_probe,
.remove = igbvf_remove,
- .driver.pm = &igbvf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&igbvf_pm_ops),
.shutdown = igbvf_shutdown,
.err_handler = &igbvf_err_handler
};
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 95d1e8c490a4..ebffd3054285 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,6 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index f48f82d5e274..8b14c029eda1 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -72,13 +72,61 @@ struct igc_rx_packet_stats {
u64 other_packets;
};
+enum igc_tx_buffer_type {
+ IGC_TX_BUFFER_TYPE_SKB,
+ IGC_TX_BUFFER_TYPE_XDP,
+ IGC_TX_BUFFER_TYPE_XSK,
+};
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igc_tx_buffer {
+ union igc_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ enum igc_tx_buffer_type type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+ unsigned int bytecount;
+ u16 gso_segs;
+ __be16 protocol;
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+ bool xsk_pending_ts;
+};
+
struct igc_tx_timestamp_request {
- struct sk_buff *skb; /* reference to the packet being timestamped */
+ union { /* reference to the packet being timestamped */
+ struct sk_buff *skb;
+ struct igc_tx_buffer *xsk_tx_buffer;
+ };
+ enum igc_tx_buffer_type buffer_type;
unsigned long start; /* when the tstamp request started (jiffies) */
u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */
u32 regl; /* which TXSTMPL_{X} register should be used */
u32 regh; /* which TXSTMPH_{X} register should be used */
u32 flags; /* flags that should be added to the tx_buffer */
+ u8 xsk_queue_index; /* Tx queue which requesting timestamp */
+ struct xsk_tx_metadata_compl xsk_meta; /* ref to xsk Tx metadata */
+};
+
+struct igc_inline_rx_tstamps {
+ /* Timestamps are saved in little endian at the beginning of the packet
+ * buffer following the layout:
+ *
+ * DWORD: | 0 | 1 | 2 | 3 |
+ * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
+ *
+ * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
+ * part of the timestamp.
+ *
+ */
+ __le32 timer1[2];
+ __le32 timer0[2];
};
struct igc_ring_container {
@@ -153,7 +201,7 @@ struct igc_ring {
struct igc_adapter {
struct net_device *netdev;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u16 eee_advert;
unsigned long state;
@@ -261,6 +309,8 @@ struct igc_adapter {
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
+ /* Free-running timer lock */
+ spinlock_t free_timer_lock;
struct cyclecounter cc;
struct timecounter tc;
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
@@ -278,6 +328,10 @@ struct igc_adapter {
struct timespec64 start;
struct timespec64 period;
} perout[IGC_N_PEROUT];
+
+ /* LEDs */
+ struct mutex led_mutex;
+ struct igc_led_classdev *leds;
};
void igc_up(struct igc_adapter *adapter);
@@ -302,6 +356,9 @@ void igc_disable_tx_ring(struct igc_ring *ring);
void igc_enable_tx_ring(struct igc_ring *ring);
int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+/* AF_XDP TX metadata operations */
+extern const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops;
+
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
void igc_regs_dump(struct igc_adapter *adapter);
@@ -469,6 +526,8 @@ enum igc_tx_flags {
IGC_TX_FLAGS_TSTAMP_1 = 0x100,
IGC_TX_FLAGS_TSTAMP_2 = 0x200,
IGC_TX_FLAGS_TSTAMP_3 = 0x400,
+
+ IGC_TX_FLAGS_TSTAMP_TIMER_1 = 0x800,
};
enum igc_boards {
@@ -485,32 +544,6 @@ enum igc_boards {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-enum igc_tx_buffer_type {
- IGC_TX_BUFFER_TYPE_SKB,
- IGC_TX_BUFFER_TYPE_XDP,
- IGC_TX_BUFFER_TYPE_XSK,
-};
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer
- */
-struct igc_tx_buffer {
- union igc_adv_tx_desc *next_to_watch;
- unsigned long time_stamp;
- enum igc_tx_buffer_type type;
- union {
- struct sk_buff *skb;
- struct xdp_frame *xdpf;
- };
- unsigned int bytecount;
- u16 gso_segs;
- __be16 protocol;
-
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- u32 tx_flags;
-};
-
struct igc_rx_buffer {
union {
struct {
@@ -531,7 +564,14 @@ struct igc_rx_buffer {
struct igc_xdp_buff {
struct xdp_buff xdp;
union igc_adv_rx_desc *rx_desc;
- ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
+ struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
+};
+
+struct igc_metadata_request {
+ struct igc_tx_buffer *tx_buffer;
+ struct xsk_tx_metadata *meta;
+ struct igc_ring *tx_ring;
+ u32 cmd_type;
};
struct igc_q_vector {
@@ -548,7 +588,6 @@ struct igc_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
- struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
@@ -566,8 +605,9 @@ enum igc_filter_match_flags {
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
- __be16 vlan_etype;
+ u16 vlan_etype;
u16 vlan_tci;
+ u16 vlan_tci_mask;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
u8 user_data[8];
@@ -700,6 +740,9 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
+int igc_led_setup(struct igc_adapter *adapter);
+void igc_led_free(struct igc_adapter *adapter);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index a1d815af507d..9fae8bdec2a7 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -68,8 +68,7 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
u32 eecd = rd32(IGC_EECD);
u16 size;
- size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
- IGC_EECD_SIZE_EX_SHIFT);
+ size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -162,8 +161,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
phy->reset_delay_us = 100;
/* set lan id */
- hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
- IGC_STATUS_FUNC_SHIFT;
+ hw->bus.func = FIELD_GET(IGC_STATUS_FUNC_MASK, rd32(IGC_STATUS));
/* Make sure the PHY is in a good state. Several people have reported
* firmware leaving the PHY's page select register set to something
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index f7d6491d4c60..bf8cdfbba9ff 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -37,6 +37,10 @@ struct igc_adv_tx_context_desc {
#define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */
#define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */
#define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_1 0x00010000 /* Select timer 1 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_2 0x00020000 /* Select timer 2 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_3 0x00030000 /* Select timer 3 for timestamp */
+
#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index b3037016f31d..5f92b3c7c3d4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -317,6 +317,8 @@
#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+#define IGC_TXD_PTP2_TIMER_1 0x00000020
+
/* IPSec Encrypt Enable */
#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 785eaa8e0ba8..f2c4f1966bb0 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -773,11 +773,9 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
break;
case ETH_SS_STATS:
for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igc_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, igc_gstrings_stats[i].stat_string);
for (i = 0; i < IGC_NETDEV_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igc_gstrings_net_stats[i].stat_string);
+ ethtool_puts(&p, igc_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -958,6 +956,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
}
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+#define VLAN_TCI_FULL_MASK ((__force __be16)~0)
static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@@ -980,10 +979,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_etype = htons(rule->filter.vlan_etype);
+ fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
+ }
+
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
fsp->flow_type |= FLOW_EXT;
fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci);
- fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask);
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
@@ -1218,6 +1223,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci);
+ rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
}
@@ -1243,7 +1249,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
/* VLAN etype matching */
if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) {
- rule->filter.vlan_etype = fsp->h_ext.vlan_etype;
+ rule->filter.vlan_etype = ntohs(fsp->h_ext.vlan_etype);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE;
}
@@ -1255,11 +1261,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
}
- /* When multiple filter options or user data or vlan etype is set, use a
- * flex filter.
+ /* The i225/i226 has various different filters. Flex filters provide a
+ * way to match up to the first 128 bytes of a packet. Use them for:
+ * a) For specific user data
+ * b) For VLAN EtherType
+ * c) For full TCI match
+ * d) Or in case multiple filter criteria are set
+ *
+ * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters.
*/
if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
(rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
+ ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) &&
+ rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) ||
(rule->filter.match_flags & (rule->filter.match_flags - 1)))
rule->flex = true;
else
@@ -1329,6 +1343,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
return -EINVAL;
}
+ /* There are two ways to match the VLAN TCI:
+ * 1. Match on PCP field and use vlan prio filter for it
+ * 2. Match on complete TCI field and use flex filter for it
+ */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ fsp->m_ext.vlan_tci &&
+ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) &&
+ fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) {
+ netdev_dbg(netdev, "VLAN mask not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* VLAN EtherType can only be matched by full mask. */
+ if ((fsp->flow_type & FLOW_EXT) &&
+ fsp->m_ext.vlan_etype &&
+ fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "VLAN EtherType mask not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (fsp->location >= IGC_MAX_RXNFC_RULES) {
netdev_dbg(netdev, "Invalid location\n");
return -EINVAL;
@@ -1428,45 +1462,46 @@ static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev)
return IGC_RETA_SIZE;
}
-static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int igc_ethtool_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < IGC_RETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
-static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int igc_ethtool_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u32 num_queues;
int i;
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
num_queues = adapter->rss_queues;
/* Verify user input. */
for (i = 0; i < IGC_RETA_SIZE; i++)
- if (indir[i] >= num_queues)
+ if (rxfh->indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGC_RETA_SIZE; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
igc_write_rss_indir_tbl(adapter);
@@ -1588,18 +1623,17 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static int igc_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
u32 eeer;
if (hw->dev_spec._base.eee_enable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
*edata = adapter->eee;
- edata->supported = SUPPORTED_Autoneg;
eeer = rd32(IGC_EEER);
@@ -1612,9 +1646,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = hw->dev_spec._base.eee_enable;
- edata->advertised = SUPPORTED_Autoneg;
- edata->lp_advertised = SUPPORTED_Autoneg;
-
/* Report correct negotiated EEE status for devices that
* wrongly report EEE at half-duplex
*/
@@ -1622,21 +1653,21 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igc_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igc_ethtool_get_eee(netdev, &eee_curr);
if (ret_val) {
@@ -1664,7 +1695,8 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
+
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
@@ -1679,21 +1711,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return 0;
}
-static int igc_ethtool_begin(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- pm_runtime_get_sync(&adapter->pdev->dev);
- return 0;
-}
-
-static void igc_ethtool_complete(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- pm_runtime_put(&adapter->pdev->dev);
-}
-
static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -1993,8 +2010,6 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_priv_flags = igc_ethtool_set_priv_flags,
.get_eee = igc_ethtool_get_eee,
.set_eee = igc_ethtool_set_eee,
- .begin = igc_ethtool_begin,
- .complete = igc_ethtool_complete,
.get_link_ksettings = igc_ethtool_get_link_ksettings,
.set_link_ksettings = igc_ethtool_set_link_ksettings,
.self_test = igc_ethtool_diag_test,
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 17546a035ab1..0dd61719f1ed 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include "igc_hw.h"
@@ -578,9 +579,8 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
/* Calculate tw_system (nsec). */
if (speed == SPEED_100) {
- tw_system = ((rd32(IGC_EEE_SU) &
- IGC_TW_SYSTEM_100_MASK) >>
- IGC_TW_SYSTEM_100_SHIFT) * 500;
+ tw_system = FIELD_GET(IGC_TW_SYSTEM_100_MASK,
+ rd32(IGC_EEE_SU)) * 500;
} else {
tw_system = (rd32(IGC_EEE_SU) &
IGC_TW_SYSTEM_1000_MASK) * 500;
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
new file mode 100644
index 000000000000..3929b25b6ae6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Linutronix GmbH */
+
+#include <linux/bits.h>
+#include <linux/leds.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <uapi/linux/uleds.h>
+
+#include "igc.h"
+
+#define IGC_NUM_LEDS 3
+
+#define IGC_LEDCTL_LED0_MODE_SHIFT 0
+#define IGC_LEDCTL_LED0_MODE_MASK GENMASK(3, 0)
+#define IGC_LEDCTL_LED0_BLINK BIT(7)
+#define IGC_LEDCTL_LED1_MODE_SHIFT 8
+#define IGC_LEDCTL_LED1_MODE_MASK GENMASK(11, 8)
+#define IGC_LEDCTL_LED1_BLINK BIT(15)
+#define IGC_LEDCTL_LED2_MODE_SHIFT 16
+#define IGC_LEDCTL_LED2_MODE_MASK GENMASK(19, 16)
+#define IGC_LEDCTL_LED2_BLINK BIT(23)
+
+#define IGC_LEDCTL_MODE_ON 0x00
+#define IGC_LEDCTL_MODE_OFF 0x01
+#define IGC_LEDCTL_MODE_LINK_10 0x05
+#define IGC_LEDCTL_MODE_LINK_100 0x06
+#define IGC_LEDCTL_MODE_LINK_1000 0x07
+#define IGC_LEDCTL_MODE_LINK_2500 0x08
+#define IGC_LEDCTL_MODE_ACTIVITY 0x0b
+
+#define IGC_SUPPORTED_MODES \
+ (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK_1000) | \
+ BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_10) | \
+ BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+#define IGC_ACTIVITY_MODES \
+ (BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+struct igc_led_classdev {
+ struct net_device *netdev;
+ struct led_classdev led;
+ int index;
+};
+
+#define lcdev_to_igc_ldev(lcdev) \
+ container_of(lcdev, struct igc_led_classdev, led)
+
+static void igc_led_select(struct igc_adapter *adapter, int led,
+ u32 *mask, u32 *shift, u32 *blink)
+{
+ switch (led) {
+ case 0:
+ *mask = IGC_LEDCTL_LED0_MODE_MASK;
+ *shift = IGC_LEDCTL_LED0_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED0_BLINK;
+ break;
+ case 1:
+ *mask = IGC_LEDCTL_LED1_MODE_MASK;
+ *shift = IGC_LEDCTL_LED1_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED1_BLINK;
+ break;
+ case 2:
+ *mask = IGC_LEDCTL_LED2_MODE_MASK;
+ *shift = IGC_LEDCTL_LED2_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED2_BLINK;
+ break;
+ default:
+ *mask = *shift = *blink = 0;
+ netdev_err(adapter->netdev, "Unknown LED %d selected!\n", led);
+ }
+}
+
+static void igc_led_set(struct igc_adapter *adapter, int led, u32 mode,
+ bool blink)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+
+ /* Set mode */
+ ledctl = rd32(IGC_LEDCTL);
+ ledctl &= ~mask;
+ ledctl |= mode << shift;
+
+ /* Configure blinking */
+ if (blink)
+ ledctl |= blink_bit;
+ else
+ ledctl &= ~blink_bit;
+ wr32(IGC_LEDCTL, ledctl);
+
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
+static u32 igc_led_get(struct igc_adapter *adapter, int led)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+ ledctl = rd32(IGC_LEDCTL);
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+
+ return (ledctl & mask) >> shift;
+}
+
+static int igc_led_brightness_set_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ if (brightness)
+ mode = IGC_LEDCTL_MODE_ON;
+ else
+ mode = IGC_LEDCTL_MODE_OFF;
+
+ netdev_dbg(adapter->netdev, "Set brightness for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ igc_led_set(adapter, ldev->index, mode, false);
+
+ return 0;
+}
+
+static int igc_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ if (flags & ~IGC_SUPPORTED_MODES)
+ return -EOPNOTSUPP;
+
+ /* If Tx and Rx selected, activity can be offloaded unless some other
+ * mode is selected as well.
+ */
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)) &&
+ !(flags & ~IGC_ACTIVITY_MODES))
+ return 0;
+
+ /* Single Rx or Tx activity is not supported. */
+ if (flags & IGC_ACTIVITY_MODES)
+ return -EOPNOTSUPP;
+
+ /* Only one mode can be active at a given time. */
+ if (flags & (flags - 1))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int igc_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode = IGC_LEDCTL_MODE_OFF;
+ bool blink = false;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode = IGC_LEDCTL_MODE_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = IGC_LEDCTL_MODE_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = IGC_LEDCTL_MODE_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode = IGC_LEDCTL_MODE_LINK_2500;
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)))
+ mode = IGC_LEDCTL_MODE_ACTIVITY;
+
+ netdev_dbg(adapter->netdev, "Set HW control for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ /* blink is recommended for activity */
+ if (mode == IGC_LEDCTL_MODE_ACTIVITY)
+ blink = true;
+
+ igc_led_set(adapter, ldev->index, mode, blink);
+
+ return 0;
+}
+
+static int igc_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ mode = igc_led_get(adapter, ldev->index);
+
+ switch (mode) {
+ case IGC_LEDCTL_MODE_ACTIVITY:
+ *flags = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case IGC_LEDCTL_MODE_LINK_10:
+ *flags = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case IGC_LEDCTL_MODE_LINK_100:
+ *flags = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case IGC_LEDCTL_MODE_LINK_1000:
+ *flags = BIT(TRIGGER_NETDEV_LINK_1000);
+ break;
+ case IGC_LEDCTL_MODE_LINK_2500:
+ *flags = BIT(TRIGGER_NETDEV_LINK_2500);
+ break;
+ }
+
+ return 0;
+}
+
+static struct device *igc_led_hw_control_get_device(struct led_classdev *led_cdev)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+
+ return &ldev->netdev->dev;
+}
+
+static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
+ size_t buf_len)
+{
+ snprintf(buf, buf_len, "igc-%x%x-led%d",
+ pci_domain_nr(adapter->pdev->bus),
+ pci_dev_id(adapter->pdev), index);
+}
+
+static int igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->netdev = netdev;
+ ldev->index = index;
+
+ igc_led_get_name(adapter, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->max_brightness = 1;
+ led_cdev->brightness_set_blocking = igc_led_brightness_set_blocking;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->hw_control_is_supported = igc_led_hw_control_is_supported;
+ led_cdev->hw_control_set = igc_led_hw_control_set;
+ led_cdev->hw_control_get = igc_led_hw_control_get;
+ led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
+
+ return led_classdev_register(&netdev->dev, led_cdev);
+}
+
+int igc_led_setup(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igc_led_classdev *leds;
+ int i, err;
+
+ mutex_init(&adapter->led_mutex);
+
+ leds = kcalloc(IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++) {
+ err = igc_setup_ldev(leds + i, netdev, i);
+ if (err)
+ goto err;
+ }
+
+ adapter->leds = leds;
+
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
+ return err;
+}
+
+void igc_led_free(struct igc_adapter *adapter)
+{
+ struct igc_led_classdev *leds = adapter->leds;
+ int i;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index e9bb403bbacf..b5bcabab7a1d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1299,14 +1299,16 @@ static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
/* insert L4 checksum */
- olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
- ((IGC_TXD_POPTS_TXSM << 8) /
- IGC_TX_FLAGS_CSUM);
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM,
+ (IGC_TXD_POPTS_TXSM << 8));
/* insert IPv4 checksum */
- olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
- (((IGC_TXD_POPTS_IXSM << 8)) /
- IGC_TX_FLAGS_IPV4);
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4,
+ (IGC_TXD_POPTS_IXSM << 8));
+
+ /* Use the second timer (free running, in general) for the timestamp */
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1,
+ IGC_TXD_PTP2_TIMER_1);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
@@ -1640,10 +1642,6 @@ done:
if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- /* FIXME: add support for retrieving timestamps from
- * the other timer registers before skipping the
- * timestamping request.
- */
unsigned long flags;
u32 tstamp_flags;
@@ -1651,6 +1649,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
}
@@ -1963,9 +1963,9 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
@@ -1982,8 +1982,10 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
/* Determine available headroom for copy */
headlen = size;
@@ -2583,11 +2585,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
int xdp_status = 0, rx_buffer_pgcnt;
while (likely(total_packets < budget)) {
- union igc_adv_rx_desc *rx_desc;
+ struct igc_xdp_buff ctx = { .rx_ts = NULL };
struct igc_rx_buffer *rx_buffer;
+ union igc_adv_rx_desc *rx_desc;
unsigned int size, truesize;
- struct igc_xdp_buff ctx;
- ktime_t timestamp = 0;
int pkt_offset = 0;
void *pktbuf;
@@ -2614,9 +2615,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
- timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
- pktbuf);
- ctx.rx_ts = timestamp;
+ ctx.rx_ts = pktbuf;
pkt_offset = IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
}
@@ -2653,8 +2652,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
else if (ring_uses_build_skb(rx_ring))
skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
else
- skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,
- timestamp);
+ skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -2714,8 +2712,7 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
net_prefetch(xdp->data_meta);
- skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -2803,9 +2800,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
ctx->rx_desc = desc;
if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
- timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
- bi->xdp->data);
- ctx->rx_ts = timestamp;
+ ctx->rx_ts = bi->xdp->data;
bi->xdp->data += IGC_TS_HDR_LEN;
@@ -2878,6 +2873,89 @@ static void igc_update_tx_stats(struct igc_q_vector *q_vector,
q_vector->tx.total_packets += packets;
}
+static void igc_xsk_request_timestamp(void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ struct igc_tx_timestamp_request *tstamp;
+ u32 tx_flags = IGC_TX_FLAGS_TSTAMP;
+ struct igc_adapter *adapter;
+ unsigned long lock_flags;
+ bool found = false;
+ int i;
+
+ if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) {
+ adapter = netdev_priv(tx_ring->netdev);
+
+ spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags);
+
+ /* Search for available tstamp regs */
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ tstamp = &adapter->tx_tstamp[i];
+
+ /* tstamp->skb and tstamp->xsk_tx_buffer are in union.
+ * When tstamp->skb is equal to NULL,
+ * tstamp->xsk_tx_buffer is equal to NULL as well.
+ * This condition means that the particular tstamp reg
+ * is not occupied by other packet.
+ */
+ if (!tstamp->skb) {
+ found = true;
+ break;
+ }
+ }
+
+ /* Return if no available tstamp regs */
+ if (!found) {
+ adapter->tx_hwtstamp_skipped++;
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock,
+ lock_flags);
+ return;
+ }
+
+ tstamp->start = jiffies;
+ tstamp->xsk_queue_index = tx_ring->queue_index;
+ tstamp->xsk_tx_buffer = meta_req->tx_buffer;
+ tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK;
+
+ /* Hold the transmit completion until timestamp is ready */
+ meta_req->tx_buffer->xsk_pending_ts = true;
+
+ /* Keep the pointer to tx_timestamp, which is located in XDP
+ * metadata area. It is the location to store the value of
+ * tx hardware timestamp.
+ */
+ xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta);
+
+ /* Set timestamp bit based on the _TSTAMP(_X) bit. */
+ tx_flags |= tstamp->flags;
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP,
+ (IGC_ADVTXD_MAC_TSTAMP));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_1,
+ (IGC_ADVTXD_TSTAMP_REG_1));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_2,
+ (IGC_ADVTXD_TSTAMP_REG_2));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_3,
+ (IGC_ADVTXD_TSTAMP_REG_3));
+
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags);
+ }
+}
+
+static u64 igc_xsk_fill_timestamp(void *_priv)
+{
+ return *(u64 *)_priv;
+}
+
+const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
+ .tmo_request_timestamp = igc_xsk_request_timestamp,
+ .tmo_fill_timestamp = igc_xsk_fill_timestamp,
+};
+
static void igc_xdp_xmit_zc(struct igc_ring *ring)
{
struct xsk_buff_pool *pool = ring->xsk_pool;
@@ -2899,24 +2977,34 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
budget = igc_desc_unused(ring);
while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
- u32 cmd_type, olinfo_status;
+ struct igc_metadata_request meta_req;
+ struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
+ u32 olinfo_status;
dma_addr_t dma;
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- xdp_desc.len;
+ meta_req.cmd_type = IGC_ADVTXD_DTYP_DATA |
+ IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS |
+ IGC_TXD_DCMD | xdp_desc.len;
olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
+ bi = &ring->tx_buffer_info[ntu];
+
+ meta_req.tx_ring = ring;
+ meta_req.tx_buffer = bi;
+ meta_req.meta = meta;
+ xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
+ &meta_req);
tx_desc = IGC_TX_DESC(ring, ntu);
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- bi = &ring->tx_buffer_info[ntu];
bi->type = IGC_TX_BUFFER_TYPE_XSK;
bi->protocol = 0;
bi->bytecount = xdp_desc.len;
@@ -2979,6 +3067,13 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
break;
+ /* Hold the completions while there's a pending tx hardware
+ * timestamp request from XDP Tx metadata.
+ */
+ if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK &&
+ tx_buffer->xsk_pending_ts)
+ break;
+
/* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL;
@@ -3385,7 +3480,7 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
u32 fhftsl;
if (input->index >= MAX_FLEX_FILTER) {
- dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
+ netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n");
return -EINVAL;
}
@@ -3420,7 +3515,6 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
struct igc_flex_filter *input)
{
- struct device *dev = &adapter->pdev->dev;
struct igc_hw *hw = &adapter->hw;
u8 *data = input->data;
u8 *mask = input->mask;
@@ -3434,7 +3528,7 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
* out early to avoid surprises later.
*/
if (input->length % 8 != 0) {
- dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
+ netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n");
return -EINVAL;
}
@@ -3452,8 +3546,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
/* Configure filter */
queuing = input->length & IGC_FHFT_LENGTH_MASK;
- queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
- queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
+ queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue);
+ queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio);
if (input->immediate_irq)
queuing |= IGC_FHFT_IMM_INT;
@@ -3504,8 +3598,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
}
wr32(IGC_WUFC, wufc);
- dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
- input->index);
+ netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n",
+ input->index);
return 0;
}
@@ -3577,9 +3671,9 @@ static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
static int igc_add_flex_filter(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
- struct igc_flex_filter flex = { };
struct igc_nfc_filter *filter = &rule->filter;
unsigned int eth_offset, user_offset;
+ struct igc_flex_filter flex = { };
int ret, index;
bool vlan;
@@ -3615,10 +3709,12 @@ static int igc_add_flex_filter(struct igc_adapter *adapter,
ETH_ALEN, NULL);
/* Add VLAN etype */
- if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
- igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
- sizeof(filter->vlan_etype),
- NULL);
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
+ __be16 vlan_etype = cpu_to_be16(filter->vlan_etype);
+
+ igc_flex_filter_add_field(&flex, &vlan_etype, 12,
+ sizeof(vlan_etype), NULL);
+ }
/* Add VLAN TCI */
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
@@ -3712,8 +3808,7 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
- int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
if (err)
@@ -3735,8 +3830,7 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter,
igc_del_etype_filter(adapter, rule->filter.etype);
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
- int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
igc_del_vlan_prio_filter(adapter, prio);
}
@@ -5181,7 +5275,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
igc_down(adapter);
netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igc_up(adapter);
@@ -5278,7 +5372,7 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -5304,25 +5398,22 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
- u32 ack, tsauxc, sec, nsec, tsicr;
struct igc_hw *hw = &adapter->hw;
+ u32 tsauxc, sec, nsec, tsicr;
struct ptp_clock_event event;
struct timespec64 ts;
tsicr = rd32(IGC_TSICR);
- ack = 0;
if (tsicr & IGC_TSICR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_SYS_WRAP;
}
if (tsicr & IGC_TSICR_TXTS) {
/* retrieve hardware timestamp */
igc_ptp_tx_tstamp_event(adapter);
- ack |= IGC_TSICR_TXTS;
}
if (tsicr & IGC_TSICR_TT0) {
@@ -5336,7 +5427,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[0].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT0;
}
if (tsicr & IGC_TSICR_TT1) {
@@ -5350,7 +5440,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[1].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT1;
}
if (tsicr & IGC_TSICR_AUTT0) {
@@ -5360,7 +5449,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 0;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT0;
}
if (tsicr & IGC_TSICR_AUTT1) {
@@ -5370,11 +5458,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 1;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT1;
}
-
- /* acknowledge the interrupts */
- wr32(IGC_TSICR, ack);
}
/**
@@ -5945,15 +6029,6 @@ static int __igc_open(struct net_device *netdev, bool resuming)
if (err)
goto err_req_irq;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
- if (err)
- goto err_set_queues;
-
clear_bit(__IGC_DOWN, &adapter->state);
for (i = 0; i < adapter->num_q_vectors; i++)
@@ -5974,8 +6049,6 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return IGC_SUCCESS;
-err_set_queues:
- igc_free_irq(adapter);
err_req_irq:
igc_release_hw_control(adapter);
igc_power_down_phy_copper_base(&adapter->hw);
@@ -5992,6 +6065,17 @@ err_setup_tx:
int igc_open(struct net_device *netdev)
{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_queues(netdev, adapter->num_tx_queues,
+ adapter->num_rx_queues);
+ if (err) {
+ netdev_err(netdev, "error setting real queue count\n");
+ return err;
+ }
+
return __igc_open(netdev, false);
}
@@ -6489,7 +6573,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
- int i, drops;
+ int i, nxmit;
if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
@@ -6505,16 +6589,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
- drops = 0;
+ nxmit = 0;
for (i = 0; i < num_frames; i++) {
int err;
struct xdp_frame *xdpf = frames[i];
err = igc_xdp_init_tx_descriptor(ring, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err)
+ break;
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH)
@@ -6522,7 +6605,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_unlock(nq);
- return num_frames - drops;
+ return nxmit;
}
static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
@@ -6562,6 +6645,24 @@ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
return 0;
}
+static ktime_t igc_get_tstamp(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_inline_rx_tstamps *tstamp;
+ ktime_t timestamp;
+
+ tstamp = hwtstamps->netdev_data;
+
+ if (cycles)
+ timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1);
+ else
+ timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
+
+ return timestamp;
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -6579,6 +6680,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
.ndo_xsk_wakeup = igc_xsk_wakeup,
+ .ndo_get_tstamp = igc_get_tstamp,
};
/* PCIe configuration access */
@@ -6682,9 +6784,11 @@ static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
{
const struct igc_xdp_buff *ctx = (void *)_ctx;
+ struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev);
+ struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts;
if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
- *timestamp = ctx->rx_ts;
+ *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
return 0;
}
@@ -6798,6 +6902,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->netdev_ops = &igc_netdev_ops;
netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
+ netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops;
igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -6923,8 +7028,6 @@ static int igc_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
- igc_ptp_init(adapter);
-
igc_tsn_clear_schedule(adapter);
/* reset the hardware with the new settings */
@@ -6946,6 +7049,9 @@ static int igc_probe(struct pci_dev *pdev,
/* Check if Media Autosense is enabled */
adapter->ei = *ei;
+ /* do hw tstamp init after resetting */
+ igc_ptp_init(adapter);
+
/* print pcie link status and MAC address */
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
@@ -6958,6 +7064,12 @@ static int igc_probe(struct pci_dev *pdev,
pm_runtime_put_noidle(&pdev->dev);
+ if (IS_ENABLED(CONFIG_IGC_LEDS)) {
+ err = igc_led_setup(adapter);
+ if (err)
+ goto err_register;
+ }
+
return 0;
err_register:
@@ -7010,6 +7122,9 @@ static void igc_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
+ if (IS_ENABLED(CONFIG_IGC_LEDS))
+ igc_led_free(adapter);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -7094,8 +7209,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
return 0;
}
-#ifdef CONFIG_PM
-static int __maybe_unused igc_runtime_suspend(struct device *dev)
+static int igc_runtime_suspend(struct device *dev)
{
return __igc_shutdown(to_pci_dev(dev), NULL, 1);
}
@@ -7130,7 +7244,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int __maybe_unused igc_resume(struct device *dev)
+static int igc_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7172,23 +7286,21 @@ static int __maybe_unused igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
- rtnl_lock();
- if (!err && netif_running(netdev))
+ if (netif_running(netdev)) {
err = __igc_open(netdev, true);
-
- if (!err)
- netif_device_attach(netdev);
- rtnl_unlock();
+ if (!err)
+ netif_device_attach(netdev);
+ }
return err;
}
-static int __maybe_unused igc_runtime_resume(struct device *dev)
+static int igc_runtime_resume(struct device *dev)
{
return igc_resume(dev);
}
-static int __maybe_unused igc_suspend(struct device *dev)
+static int igc_suspend(struct device *dev)
{
return __igc_shutdown(to_pci_dev(dev), NULL, 0);
}
@@ -7203,7 +7315,6 @@ static int __maybe_unused igc_runtime_idle(struct device *dev)
return -EBUSY;
}
-#endif /* CONFIG_PM */
static void igc_shutdown(struct pci_dev *pdev)
{
@@ -7318,22 +7429,16 @@ static const struct pci_error_handlers igc_err_handler = {
.resume = igc_io_resume,
};
-#ifdef CONFIG_PM
-static const struct dev_pm_ops igc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
- SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
- igc_runtime_idle)
-};
-#endif
+static _DEFINE_DEV_PM_OPS(igc_pm_ops, igc_suspend, igc_resume,
+ igc_runtime_suspend, igc_runtime_resume,
+ igc_runtime_idle);
static struct pci_driver igc_driver = {
.name = igc_driver_name,
.id_table = igc_pci_tbl,
.probe = igc_probe,
.remove = igc_remove,
-#ifdef CONFIG_PM
- .driver.pm = &igc_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&igc_pm_ops),
.shutdown = igc_shutdown,
.err_handler = &igc_err_handler,
};
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 53b77c969c85..861f37076861 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
+#include <linux/bitfield.h>
#include "igc_phy.h"
/**
@@ -129,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* Temporary workaround - should be removed when PHY will implement
- * IEEE registers as properly
- */
- /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
usleep_range(1000, 2000);
}
@@ -726,7 +723,7 @@ static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
*/
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
{
- u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ u8 dev_addr = FIELD_GET(GPY_MMD_MASK, offset);
s32 ret_val;
offset = offset & GPY_REG_MASK;
@@ -757,7 +754,7 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
*/
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
{
- u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ u8 dev_addr = FIELD_GET(GPY_MMD_MASK, offset);
s32 ret_val;
offset = offset & GPY_REG_MASK;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 928f38792203..1bb026232efc 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -11,6 +11,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <net/xdp_sock_drv.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
@@ -459,12 +460,10 @@ static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
/**
* igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
* @adapter: Pointer to adapter the packet buffer belongs to
- * @buf: Pointer to packet buffer
+ * @buf: Pointer to start of timestamp in HW format (2 32-bit words)
*
- * This function retrieves the timestamp saved in the beginning of packet
- * buffer. While two timestamps are available, one in timer0 reference and the
- * other in timer1 reference, this function considers only the timestamp in
- * timer0 reference.
+ * This function retrieves and converts the timestamp stored at @buf
+ * to ktime_t, adjusting for hardware latencies.
*
* Returns timestamp value.
*/
@@ -474,17 +473,8 @@ ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf)
u32 secs, nsecs;
int adjust;
- /* Timestamps are saved in little endian at the beginning of the packet
- * buffer following the layout:
- *
- * DWORD: | 0 | 1 | 2 | 3 |
- * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
- *
- * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
- * part of the timestamp.
- */
- nsecs = le32_to_cpu(buf[2]);
- secs = le32_to_cpu(buf[3]);
+ nsecs = le32_to_cpu(buf[0]);
+ secs = le32_to_cpu(buf[1]);
timestamp = ktime_set(secs, nsecs);
@@ -542,10 +532,11 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
val = rd32(IGC_SRRCTL(i));
- /* FIXME: For now, only support retrieving RX timestamps from
- * timer 0.
+ /* Enable retrieving timestamps from timer 0, the
+ * "adjustable clock" and timer 1 the "free running
+ * clock".
*/
- val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) |
+ val |= IGC_SRRCTL_TIMER1SEL(1) | IGC_SRRCTL_TIMER0SEL(0) |
IGC_SRRCTL_TIMESTAMP;
wr32(IGC_SRRCTL(i), val);
}
@@ -555,6 +546,30 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
wr32(IGC_TSYNCRXCTL, val);
}
+static void igc_ptp_free_tx_buffer(struct igc_adapter *adapter,
+ struct igc_tx_timestamp_request *tstamp)
+{
+ if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) {
+ /* Release the transmit completion */
+ tstamp->xsk_tx_buffer->xsk_pending_ts = false;
+
+ /* Note: tstamp->skb and tstamp->xsk_tx_buffer are in union.
+ * By setting tstamp->xsk_tx_buffer to NULL, tstamp->skb will
+ * become NULL as well.
+ */
+ tstamp->xsk_tx_buffer = NULL;
+ tstamp->buffer_type = 0;
+
+ /* Trigger txrx interrupt for transmit completion */
+ igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index, 0);
+
+ return;
+ }
+
+ dev_kfree_skb_any(tstamp->skb);
+ tstamp->skb = NULL;
+}
+
static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
{
unsigned long flags;
@@ -565,8 +580,8 @@ static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
- dev_kfree_skb_any(tstamp->skb);
- tstamp->skb = NULL;
+ if (tstamp->skb)
+ igc_ptp_free_tx_buffer(adapter, tstamp);
}
spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
@@ -667,8 +682,9 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
static void igc_ptp_tx_timeout(struct igc_adapter *adapter,
struct igc_tx_timestamp_request *tstamp)
{
- dev_kfree_skb_any(tstamp->skb);
- tstamp->skb = NULL;
+ if (tstamp->skb)
+ igc_ptp_free_tx_buffer(adapter, tstamp);
+
adapter->tx_hwtstamp_timeouts++;
netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
@@ -739,10 +755,21 @@ static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter,
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
- tstamp->skb = NULL;
+ /* Copy the tx hardware timestamp into xdp metadata or skb */
+ if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) {
+ struct xsk_buff_pool *xsk_pool;
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ xsk_pool = adapter->tx_ring[tstamp->xsk_queue_index]->xsk_pool;
+ if (xsk_pool && xp_tx_metadata_enabled(xsk_pool)) {
+ xsk_tx_metadata_complete(&tstamp->xsk_meta,
+ &igc_xsk_tx_metadata_ops,
+ &shhwtstamps.hwtstamp);
+ }
+ } else {
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ igc_ptp_free_tx_buffer(adapter, tstamp);
}
/**
@@ -1035,6 +1062,26 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
adapter, &adapter->snapshot, cts);
}
+static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter, ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->free_timer_lock, flags);
+
+ ptp_read_system_prets(sts);
+ ts->tv_nsec = rd32(IGC_SYSTIML_1);
+ ts->tv_sec = rd32(IGC_SYSTIMH_1);
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&igc->free_timer_lock, flags);
+
+ return 0;
+}
+
/**
* igc_ptp_init - Initialize PTP functionality
* @adapter: Board private structure
@@ -1088,6 +1135,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225;
adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
+ adapter->ptp_caps.getcyclesx64 = igc_ptp_getcyclesx64;
adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
adapter->ptp_caps.pps = 1;
@@ -1108,6 +1156,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
}
spin_lock_init(&adapter->ptp_tx_lock);
+ spin_lock_init(&adapter->free_timer_lock);
spin_lock_init(&adapter->tmreg_lock);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 20e17f5fbce3..e5b893fc5b66 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -12,6 +12,7 @@
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
+#define IGC_LEDCTL 0x00E00 /* LED Control - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
@@ -243,6 +244,11 @@
#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define IGC_SYSTIML_1 0x0B688 /* System time register Low - RO (timer 1) */
+#define IGC_SYSTIMH_1 0x0B68C /* System time register High - RO (timer 1) */
+#define IGC_SYSTIMR_1 0x0B684 /* System time register Residue (timer 1) */
+#define IGC_TIMINCA_1 0x0B690 /* Increment attributes register - RW (timer 1) */
+
/* TX Timestamp Low */
#define IGC_TXSTMPL_0 0x0B618
#define IGC_TXSTMPL_1 0x0B698
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index a9c08321aca9..22cefb1eeedf 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -227,7 +227,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_TQAVCC(i), tqavcc);
wr32(IGC_TQAVHC(i),
- 0x80000000 + ring->hicredit * 0x7735);
+ 0x80000000 + ring->hicredit * 0x7736);
} else {
/* Disable any CBS for the queue */
txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6f0376e42f4..559b443c409f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -949,19 +949,19 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
int ethtool_ioctl(struct ifreq *ifr);
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask);
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
@@ -1059,7 +1059,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#ifdef CONFIG_IXGBE_IPSEC
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 100388968e4d..283a23150a4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -15,10 +15,10 @@
#define IXGBE_82598_VFT_TBL_SIZE 128
#define IXGBE_82598_RX_PB_SIZE 512
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
/**
@@ -66,7 +66,7 @@ out:
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -93,12 +93,12 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
u16 list_offset, data_offset;
+ int ret_val;
/* Identify the PHY */
phy->ops.identify(hw);
@@ -123,14 +123,14 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
&list_offset,
&data_offset);
if (ret_val)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
break;
default:
break;
@@ -148,9 +148,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
* Then set pcie completion timeout
*
**/
-static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -170,7 +170,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -213,7 +213,7 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
break;
default:
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
return 0;
@@ -271,7 +271,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+static int ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
u32 fctrl_reg;
u32 rmcs_reg;
@@ -283,7 +283,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
/* Validate the water mark configuration */
if (!hw->fc.pause_time)
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
/* Low water mark of zero causes XOFF floods */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -292,7 +292,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
if (!hw->fc.low_water[i] ||
hw->fc.low_water[i] >= hw->fc.high_water[i]) {
hw_dbg(hw, "Invalid water mark configuration\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
}
}
@@ -369,7 +369,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Set 802.3x based flow control settings. */
@@ -411,13 +411,13 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
/* Restart link */
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -438,7 +438,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autonegotiation did not complete.\n");
}
}
@@ -457,7 +457,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
* Function indicates success when phy link is available. If phy is not ready
* within 5 seconds of MAC indicating link, the function returns error.
**/
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+static int ixgbe_validate_link_ready(struct ixgbe_hw *hw)
{
u32 timeout;
u16 an_reg;
@@ -478,7 +478,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
hw_dbg(hw, "Link was indicated but link is down\n");
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
return 0;
@@ -493,7 +493,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *link_up,
bool link_up_wait_to_complete)
{
@@ -579,7 +579,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -594,7 +594,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
@@ -624,11 +624,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -647,15 +647,15 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
* clears all interrupts, performing a PHY reset, and performing a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
- s32 status;
- s32 phy_status = 0;
- u32 ctrl;
+ int phy_status = 0;
+ u8 analog_val;
u32 gheccr;
- u32 i;
+ int status;
u32 autoc;
- u8 analog_val;
+ u32 ctrl;
+ u32 i;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -701,9 +701,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
/* Init PHY and function pointers, perform SFP setup */
phy_status = hw->phy.ops.init(hw);
- if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (phy_status == -EOPNOTSUPP)
return phy_status;
- if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (phy_status == -ENOENT)
goto mac_reset_top;
hw->phy.ops.reset(hw);
@@ -727,7 +727,7 @@ mac_reset_top:
udelay(1);
}
if (ctrl & IXGBE_CTRL_RST) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -781,7 +781,7 @@ mac_reset_top:
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq set index
**/
-static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -789,12 +789,12 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
rar_high &= ~IXGBE_RAH_VIND_MASK;
- rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+ rar_high |= FIELD_PREP(IXGBE_RAH_VIND_MASK, vmdq);
IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
return 0;
}
@@ -805,7 +805,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq clear index (not used in 82598, but elsewhere)
**/
-static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -814,7 +814,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
@@ -836,7 +836,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+static int ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regindex;
@@ -845,7 +845,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
u32 vftabyte;
if (vlan > 4095)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Determine 32-bit word position in array */
regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
@@ -881,7 +881,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+static int ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
u32 offset;
u32 vlanbyte;
@@ -905,7 +905,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
*
* Performs read operation to Atlas analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 atlas_ctl;
@@ -927,7 +927,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Atlas analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 atlas_ctl;
@@ -948,13 +948,13 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
*
* Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+static int ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u8 byte_offset, u8 *eeprom_data)
{
- s32 status = 0;
u16 sfp_addr = 0;
u16 sfp_data = 0;
u16 sfp_stat = 0;
+ int status = 0;
u16 gssr;
u32 i;
@@ -964,7 +964,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
gssr = IXGBE_GSSR_PHY0_SM;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
if (hw->phy.type == ixgbe_phy_nl) {
/*
@@ -993,7 +993,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
hw_dbg(hw, "EEPROM read did not pass.\n");
- status = IXGBE_ERR_SFP_NOT_PRESENT;
+ status = -ENOENT;
goto out;
}
@@ -1003,7 +1003,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
- status = IXGBE_ERR_PHY;
+ status = -EIO;
}
out:
@@ -1019,7 +1019,7 @@ out:
*
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
@@ -1034,8 +1034,8 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
-static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *sff8472_data)
+static int ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
byte_offset, sff8472_data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 58ea959a4482..cdaf087b4e85 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -21,24 +21,24 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
@@ -98,10 +98,10 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
}
}
-static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
u16 list_offset, data_offset, data_value;
+ int ret_val;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -117,7 +117,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
goto setup_sfp_err;
@@ -144,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
if (ret_val) {
hw_dbg(hw, " sfp module setup not complete\n");
- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ return -EIO;
}
}
@@ -159,7 +159,7 @@ setup_sfp_err:
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ return -EIO;
}
/**
@@ -173,10 +173,10 @@ setup_sfp_err:
* prot_autoc_write_82599(). Note, that locked can only be true in cases
* where this function doesn't return an error.
**/
-static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
u32 *reg_val)
{
- s32 ret_val;
+ int ret_val;
*locked = false;
/* If LESM is on then we need to hold the SW/FW semaphore. */
@@ -184,7 +184,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
*locked = true;
}
@@ -203,9 +203,9 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
* This part (82599) may need to hold a the SW/FW lock around all writes to
* AUTOC. Likewise after a write we need to do a pipeline reset.
**/
-static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+static int prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
{
- s32 ret_val = 0;
+ int ret_val = 0;
/* Blocked by MNG FW so bail */
if (ixgbe_check_reset_blocked(hw))
@@ -219,7 +219,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
locked = true;
}
@@ -237,7 +237,7 @@ out:
return ret_val;
}
-static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -263,11 +263,11 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
u32 esdp;
if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
@@ -322,7 +322,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -334,7 +334,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
return 0;
@@ -400,7 +402,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
break;
default:
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
}
if (hw->phy.multispeed_fiber) {
@@ -500,14 +502,14 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
{
+ bool got_lock = false;
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
- bool got_lock = false;
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
@@ -541,7 +543,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -657,15 +659,15 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
*
* Implements the Intel SmartSpeed algorithm.
**/
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 i, j;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ bool link_up = false;
+ int status = 0;
+ s32 i, j;
/* Set autoneg_advertised value based on input link speed */
hw->phy.autoneg_advertised = 0;
@@ -767,16 +769,15 @@ out:
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- bool autoneg = false;
- s32 status;
- u32 pma_pmd_1g, link_mode, links_reg, i;
- u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 pma_pmd_10g_serial, pma_pmd_1g, link_mode, links_reg, i;
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ bool autoneg = false;
+ int status;
/* holds the value of AUTOC register at this current point in time */
u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -785,6 +786,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* temporary variable used for comparison purposes */
u32 autoc = current_autoc;
+ pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
&autoneg);
@@ -794,7 +797,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
@@ -861,8 +864,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
msleep(100);
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
- status =
- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ status = -EIO;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -883,11 +885,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
*
* Restarts link on PHY and MAC based on settings passed in.
**/
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -906,13 +908,13 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
- s32 status;
u32 ctrl, i, autoc, autoc2;
- u32 curr_lms;
bool link_up = false;
+ u32 curr_lms;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -927,7 +929,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
/* Identify PHY and related function pointers */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Setup SFP module if there is one present. */
@@ -936,7 +938,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = false;
}
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Reset PHY */
@@ -974,7 +976,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -1082,7 +1084,7 @@ mac_reset_top:
* @hw: pointer to hardware structure
* @fdircmd: current value of FDIRCMD register
*/
-static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+static int ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
{
int i;
@@ -1093,19 +1095,19 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
udelay(10);
}
- return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+ return -EIO;
}
/**
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
- int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
u32 fdircmd;
- s32 err;
+ int err;
+ int i;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
@@ -1155,7 +1157,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
- return IXGBE_ERR_FDIR_REINIT_FAILED;
+ return -EIO;
}
/* Clear FDIR statistics registers (read to clear) */
@@ -1213,7 +1215,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1237,7 +1239,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1360,7 +1362,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* Note that the tunnel bit in input must not be set when the hardware
* tunneling support does not exist.
**/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue)
@@ -1387,7 +1389,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on flow type input\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* configure FDIRCMD register */
@@ -1516,7 +1518,7 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
#define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value)))
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask)
{
/* mask IPv6 since it is currently not supported */
@@ -1546,7 +1548,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on vm pool mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
@@ -1555,14 +1557,14 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
if (input_mask->formatted.dst_port ||
input_mask->formatted.src_port) {
hw_dbg(hw, " Error on src/dst port mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
break;
case IXGBE_ATR_L4TYPE_MASK:
break;
default:
hw_dbg(hw, " Error on flow type mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
@@ -1583,7 +1585,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on VLAN mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
@@ -1595,7 +1597,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
break;
default:
hw_dbg(hw, " Error on flexible byte mask\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
@@ -1628,12 +1630,12 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue)
{
u32 fdirport, fdirvlan, fdirhash, fdircmd;
- s32 err;
+ int err;
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1691,13 +1693,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id)
{
u32 fdirhash;
u32 fdircmd;
- s32 err;
+ int err;
/* configure FDIRHASH register */
fdirhash = (__force u32)input->formatted.bkt_hash;
@@ -1735,7 +1737,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
*
* Performs read operation to Omer analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 core_ctl;
@@ -1757,7 +1759,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Omer analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 core_ctl;
@@ -1777,9 +1779,9 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82599(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
+ int ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -1803,9 +1805,9 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
* If PHY already detected, maintains current PHY type in hw struct,
* otherwise executes the PHY detection routine.
**/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Detect PHY if not unknown - returns success if already detected. */
status = ixgbe_identify_phy_generic(hw);
@@ -1824,7 +1826,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
/* Return error if SFP module has been detected but is not supported */
if (hw->phy.type == ixgbe_phy_sfp_unsupported)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
return status;
}
@@ -1836,7 +1838,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit for 82599
**/
-static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+static int ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
/*
* Workaround for 82599 silicon errata when enabling the Rx datapath.
@@ -1863,15 +1865,15 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Verifies that installed the firmware version is 0.6 or higher
* for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
*
- * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
- * if the FW version is not supported.
+ * Return: -EACCES if the FW is not present or if the FW version is
+ * not supported.
**/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
- u16 offset;
+ int status = -EACCES;
u16 fw_version = 0;
+ u16 offset;
/* firmware check is only necessary for SFI devices */
if (hw->phy.media_type != ixgbe_media_type_fiber)
@@ -1883,7 +1885,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
goto fw_version_err;
if (fw_offset == 0 || fw_offset == 0xFFFF)
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
/* get the offset to the Pass Through Patch Configuration block */
offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
@@ -1891,7 +1893,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
goto fw_version_err;
if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
/* get the firmware version */
offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
@@ -1905,7 +1907,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
fw_version_err:
hw_err(hw, "eeprom read at offset %d failed\n", offset);
- return IXGBE_ERR_EEPROM_VERSION;
+ return -EACCES;
}
/**
@@ -1918,7 +1920,7 @@ fw_version_err:
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
- s32 status;
+ int status;
/* get the offset to the Firmware Module block */
status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
@@ -1957,7 +1959,7 @@ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
*
* Retrieves 16 bit word(s) read from EEPROM
**/
-static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -1983,7 +1985,7 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word from the EEPROM
**/
-static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+static int ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
u16 offset, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -2007,11 +2009,11 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 anlp1_reg = 0;
u32 i, autoc_reg, autoc2_reg;
+ u32 anlp1_reg = 0;
+ int ret_val;
/* Enable link if disabled in NVM */
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
@@ -2038,7 +2040,7 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
hw_dbg(hw, "auto negotiation not completed\n");
- ret_val = IXGBE_ERR_RESET_FAILED;
+ ret_val = -EIO;
goto reset_pipeline_out;
}
@@ -2062,12 +2064,12 @@ reset_pipeline_out:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
@@ -2087,7 +2089,7 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
goto release_i2c_access;
}
}
@@ -2116,12 +2118,12 @@ release_i2c_access:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
@@ -2141,7 +2143,7 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
goto release_i2c_access;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 878dd8dff528..3be1bfb16498 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -10,10 +10,10 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u16 count);
@@ -22,15 +22,15 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -111,12 +111,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
**/
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
- u16 reg_cu = 0;
bool locked = false;
+ int ret_val = 0;
+ u16 reg_cu = 0;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -124,7 +124,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
*/
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
/*
@@ -215,7 +215,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
if (hw->mac.type != ixgbe_mac_X540) {
@@ -267,11 +267,11 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 ctrl_ext;
u16 device_caps;
+ u32 ctrl_ext;
+ int ret_val;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
@@ -330,7 +330,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* 82599
* X540
**/
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
u32 i;
@@ -354,9 +354,9 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
* up link and flow control settings, and leaves transmit and receive units
* disabled and uninitialized
**/
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
@@ -380,7 +380,7 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
* Clears all hardware statistics counters by reading them from the hardware
* Statistics counters are clear on read.
**/
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
{
u16 i = 0;
@@ -489,18 +489,18 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
*
* Reads the part number string from the EEPROM.
**/
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size)
{
- s32 ret_val;
- u16 data;
+ int ret_val;
u16 pba_ptr;
u16 offset;
u16 length;
+ u16 data;
if (pba_num == NULL) {
hw_dbg(hw, "PBA string buffer was null\n");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
@@ -526,7 +526,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
/* we will need 11 characters to store the PBA */
if (pba_num_size < 11) {
hw_dbg(hw, "PBA string buffer too small\n");
- return IXGBE_ERR_NO_SPACE;
+ return -ENOSPC;
}
/* extract hex string from data and pba_ptr */
@@ -563,13 +563,13 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
if (length == 0xFFFF || length == 0) {
hw_dbg(hw, "NVM PBA number section invalid length\n");
- return IXGBE_ERR_PBA_SECTION;
+ return -EIO;
}
/* check if pba_num buffer is big enough */
if (pba_num_size < (((u32)length * 2) - 1)) {
hw_dbg(hw, "PBA string buffer too small\n");
- return IXGBE_ERR_NO_SPACE;
+ return -ENOSPC;
}
/* trim pba length from start of string */
@@ -599,7 +599,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
* A reset of the adapter must be performed prior to calling this function
* in order for the MAC address to have been loaded from the EEPROM into RAR0
**/
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
{
u32 rar_high;
u32 rar_low;
@@ -653,7 +653,7 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
*
* Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
**/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
u16 link_status;
@@ -684,7 +684,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
u32 reg;
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
- bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+ bus->func = FIELD_GET(IXGBE_STATUS_LAN_ID, reg);
bus->lan_id = bus->func;
/* check for a port swap */
@@ -695,8 +695,8 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
/* Get MAC instance from EEPROM for configuring CS4227 */
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
- bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
- IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ bus->instance_id = FIELD_GET(IXGBE_EE_CTRL_4_INST_ID,
+ ee_ctrl_4);
}
}
@@ -709,7 +709,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
u32 reg_val;
u16 i;
@@ -759,7 +759,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Store the index for the link active LED. This will be used to support
* blinking the LED.
**/
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 led_reg, led_mode;
@@ -800,12 +800,12 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @index: led number to turn on
**/
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -821,12 +821,12 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to turn off
**/
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn off the LED, set mode to OFF. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -844,7 +844,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@@ -870,10 +870,9 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* SPI EEPROM is assumed here. This code would need to
* change if a future EEPROM is not SPI.
*/
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -896,19 +895,16 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset + words > hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || (offset + words > hw->eeprom.word_size))
+ return -EINVAL;
/*
* The EEPROM page size cannot be queried from the chip. We do lazy
@@ -946,14 +942,14 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
u16 page_size;
+ int status;
+ u16 word;
u16 i;
- u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
@@ -962,7 +958,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
if (ixgbe_ready_eeprom(hw) != 0) {
ixgbe_release_eeprom(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
for (i = 0; i < words; i++) {
@@ -1023,12 +1019,12 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ return -EINVAL;
return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
}
@@ -1042,19 +1038,16 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset + words > hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || (offset + words > hw->eeprom.word_size))
+ return -EINVAL;
/*
* We cannot hold synchronization semaphores for too long
@@ -1084,12 +1077,12 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word_in;
u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 word_in;
+ int status;
u16 i;
/* Prepare the EEPROM for reading */
@@ -1099,7 +1092,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
if (ixgbe_ready_eeprom(hw) != 0) {
ixgbe_release_eeprom(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
for (i = 0; i < words; i++) {
@@ -1136,13 +1129,13 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit value from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data)
{
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ return -EINVAL;
return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
}
@@ -1156,20 +1149,17 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eerd;
- s32 status;
u32 i;
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || offset >= hw->eeprom.word_size)
+ return -EINVAL;
for (i = 0; i < words; i++) {
eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1199,11 +1189,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
* This function is called only when we are writing a new large buffer
* at given offset so the data would be overwritten anyway.
**/
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset)
{
u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
- s32 status;
+ int status;
u16 i;
for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
@@ -1239,7 +1229,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
}
@@ -1253,20 +1243,17 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eewr;
- s32 status;
u16 i;
hw->eeprom.ops.init_params(hw);
- if (words == 0)
- return IXGBE_ERR_INVALID_ARGUMENT;
-
- if (offset >= hw->eeprom.word_size)
- return IXGBE_ERR_EEPROM;
+ if (words == 0 || offset >= hw->eeprom.word_size)
+ return -EINVAL;
for (i = 0; i < words; i++) {
eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1299,7 +1286,7 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
}
@@ -1312,7 +1299,7 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -1328,7 +1315,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
}
udelay(5);
}
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -1338,13 +1325,13 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
* Prepares EEPROM for access using bit-bang method. This function should
* be called before issuing a command to the EEPROM.
**/
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
u32 i;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
@@ -1366,7 +1353,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
hw_dbg(hw, "Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* Setup EEPROM for Read/Write */
@@ -1384,7 +1371,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
*
* Sets the hardware semaphores so EEPROM access can occur for bit-bang method
**/
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -1419,7 +1406,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SMBI) {
hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
}
@@ -1447,7 +1434,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
if (i >= timeout) {
hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
return 0;
@@ -1475,7 +1462,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
* ixgbe_ready_eeprom - Polls for EEPROM ready
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw)
{
u16 i;
u8 spi_stat_reg;
@@ -1503,7 +1490,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
*/
if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
hw_dbg(hw, "SPI EEPROM Status error\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
return 0;
@@ -1693,7 +1680,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1715,7 +1702,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
if (hw->eeprom.ops.read(hw, i, &pointer)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* If the pointer seems invalid */
@@ -1724,7 +1711,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
if (hw->eeprom.ops.read(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
if (length == 0xFFFF || length == 0)
@@ -1733,7 +1720,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
for (j = pointer + 1; j <= pointer + length; j++) {
if (hw->eeprom.ops.read(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -1741,7 +1728,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -1752,12 +1739,12 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1786,7 +1773,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* calculated checksum
*/
if (read_checksum != checksum)
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
/* If the user cares, return the calculated checksum */
if (checksum_val)
@@ -1799,10 +1786,10 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1836,7 +1823,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
*
* Puts an ethernet address into a receive address register.
**/
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr)
{
u32 rar_low, rar_high;
@@ -1845,7 +1832,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
/* setup VMDq pool selection before this RAR gets enabled */
@@ -1889,7 +1876,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
*
* Clears an ethernet address from a receive address register.
**/
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1897,7 +1884,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
/*
@@ -1930,7 +1917,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
**/
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
{
u32 i;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1993,7 +1980,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
@@ -2062,7 +2049,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
@@ -2104,7 +2091,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
*
* Enables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2121,7 +2108,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
*
* Disables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2137,7 +2124,7 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
{
u32 mflcn_reg, fccfg_reg;
u32 reg;
@@ -2146,7 +2133,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
/* Validate the water mark configuration. */
if (!hw->fc.pause_time)
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
/* Low water mark of zero causes XOFF floods */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -2155,7 +2142,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
if (!hw->fc.low_water[i] ||
hw->fc.low_water[i] >= hw->fc.high_water[i]) {
hw_dbg(hw, "Invalid water mark configuration\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
}
}
@@ -2212,7 +2199,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
/* Set 802.3x based flow control settings. */
@@ -2265,11 +2252,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EINVAL;
if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
@@ -2307,10 +2294,10 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
*
* Enable flow control according on 1 gig fiber.
**/
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- s32 ret_val;
+ int ret_val;
/*
* On multispeed fiber at 1g, bail out if
@@ -2321,7 +2308,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
(!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2341,10 +2328,10 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
- s32 ret_val;
+ int ret_val;
/*
* On backplane, bail out if
@@ -2353,12 +2340,12 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
- return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ return -EIO;
}
/*
* Read the 10g AN autoc and LP ability registers and resolve
@@ -2380,7 +2367,7 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
{
u16 technology_ability_reg = 0;
u16 lp_technology_ability_reg = 0;
@@ -2407,8 +2394,8 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
**/
void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
+ int ret_val = -EIO;
bool link_up;
/*
@@ -2510,11 +2497,11 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Disables PCI-Express primary access and verifies there are no pending
- * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
+ * requests. -EALREADY is returned if primary disable
* bit hasn't caused the primary requests to be disabled, else 0
* is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2575,7 +2562,7 @@ gio_disable_fail:
}
hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- return IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
+ return -EALREADY;
}
/**
@@ -2586,7 +2573,7 @@ gio_disable_fail:
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@@ -2600,7 +2587,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_eeprom_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
if (!(gssr & (fwmask | swmask))) {
@@ -2620,7 +2607,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
usleep_range(5000, 10000);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
/**
@@ -2654,7 +2641,7 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
*
* The default case requires no protection so just to the register read.
**/
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
{
*locked = false;
*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -2668,7 +2655,7 @@ s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous read.
**/
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
{
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
return 0;
@@ -2681,7 +2668,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
* Stops the receive data path and waits for the HW to internally
* empty the Rx security block.
**/
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
{
#define IXGBE_MAX_SECRX_POLL 40
int i;
@@ -2713,7 +2700,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the receive data path
**/
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
{
u32 secrxreg;
@@ -2732,7 +2719,7 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit
**/
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{
if (regval & IXGBE_RXCTRL_RXEN)
hw->mac.ops.enable_rx(hw);
@@ -2747,17 +2734,17 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
* @hw: pointer to hardware structure
* @index: led number to blink
**/
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
- ixgbe_link_speed speed = 0;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ixgbe_link_speed speed = 0;
+ bool link_up = false;
bool locked = false;
- s32 ret_val;
+ int ret_val;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/*
* Link must be up to auto-blink the LEDs;
@@ -2795,15 +2782,15 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to stop blinking
**/
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
bool locked = false;
- s32 ret_val;
+ u32 autoc_reg = 0;
+ int ret_val;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val)
@@ -2834,10 +2821,10 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
* pointer, and returns the value at that location. This is used in both
* get and set mac_addr routines.
**/
-static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+static int ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
{
- s32 ret_val;
+ int ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2862,11 +2849,11 @@ static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
* set_lan_id() is called by identify_sfp(), but this cannot be relied
* upon for non-SFP connections, so we must call it here.
**/
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
+ int ret_val;
u8 i;
- s32 ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2955,7 +2942,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar_lo, mpsar_hi;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2963,7 +2950,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
@@ -3006,7 +2993,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq pool index
**/
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -3014,7 +3001,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
if (vmdq < 32) {
@@ -3039,7 +3026,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
**/
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
u32 rar = hw->mac.san_mac_rar_index;
@@ -3058,7 +3045,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
{
int i;
@@ -3078,9 +3065,9 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* return the VLVF index where this VLAN id should be placed
*
**/
-static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+static int ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
{
- s32 regindex, first_empty_slot;
+ int regindex, first_empty_slot;
u32 bits;
/* short cut the special case */
@@ -3091,7 +3078,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
* will simply bypass the VLVF if there are no entries present in the
* VLVF that contain our VLAN
*/
- first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
+ first_empty_slot = vlvf_bypass ? -ENOSPC : 0;
/* add VLAN enable bit for comparison */
vlan |= IXGBE_VLVF_VIEN;
@@ -3115,7 +3102,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
if (!first_empty_slot)
hw_dbg(hw, "No space in VLVF.\n");
- return first_empty_slot ? : IXGBE_ERR_NO_SPACE;
+ return first_empty_slot ? : -ENOSPC;
}
/**
@@ -3128,14 +3115,14 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regidx, vfta_delta, vfta, bits;
- s32 vlvf_index;
+ int vlvf_index;
if ((vlan > 4095) || (vind > 63))
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/*
* this is a 2 part operation - first the VFTA, then the
@@ -3239,7 +3226,7 @@ vfta_update:
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
u32 offset;
@@ -3289,7 +3276,7 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);
@@ -3409,8 +3396,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* This function will read the EEPROM from the alternative SAN MAC address
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
@@ -3507,7 +3494,7 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* This function will read the EEPROM location for the device capabilities,
* and return the word through device_caps.
**/
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
{
hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
@@ -3611,12 +3598,13 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
*
* Communicates with the manageability block. On success return 0
* else returns semaphore error when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * semaphore, -EINVAL when incorrect parameters passed or -EIO when
+ * command fails.
*
* This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
* by the caller.
**/
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
u32 timeout)
{
u32 hicr, i, fwsts;
@@ -3624,7 +3612,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EINVAL;
}
/* Set bit 9 of FWSTS clearing FW reset indication */
@@ -3635,13 +3623,13 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
}
dword_len = length >> 2;
@@ -3666,7 +3654,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
/* Check command successful completion. */
if ((timeout && i == timeout) ||
!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
return 0;
}
@@ -3686,22 +3674,22 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
* in these cases.
*
* Communicates with the manageability block. On success return 0
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ * else return -EIO or -EINVAL.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
struct ixgbe_hic_hdr *hdr = buffer;
- u32 *u32arr = buffer;
u16 buf_len, dword_len;
- s32 status;
+ u32 *u32arr = buffer;
+ int status;
u32 bi;
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EINVAL;
}
/* Take management host interface semaphore */
status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
@@ -3731,7 +3719,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
if (length < round_up(buf_len, 4) + hdr_size) {
hw_dbg(hw, "Buffer not large enough for reply message.\n");
- status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = -EIO;
goto rel_out;
}
@@ -3762,16 +3750,16 @@ rel_out:
*
* Sends driver version number to firmware through the manageability
* block. On success return 0
- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * else returns -EBUSY when encountering an error acquiring
+ * semaphore or -EIO when command fails.
**/
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, __always_unused u16 len,
__always_unused const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
+ int ret_val;
int i;
- s32 ret_val;
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
@@ -3799,7 +3787,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
FW_CEM_RESP_STATUS_SUCCESS)
ret_val = 0;
else
- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ ret_val = -EIO;
break;
}
@@ -3887,24 +3875,24 @@ static const u8 ixgbe_emc_therm_limit[4] = {
*
* Returns error code.
**/
-static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
+static int ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
u16 *ets_offset)
{
- s32 status;
+ int status;
status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
if (status)
return status;
if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
if (status)
return status;
if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
return 0;
}
@@ -3915,19 +3903,19 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
*
* Returns the thermal sensor data structure
**/
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 ets_offset;
- u16 ets_cfg;
u16 ets_sensor;
u8 num_sensors;
+ u16 ets_cfg;
+ int status;
u8 i;
struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
/* Only support thermal sensors attached to physical port 0 */
if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
if (status)
@@ -3946,10 +3934,10 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
if (status)
return status;
- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
- IXGBE_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
- IXGBE_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK,
+ ets_sensor);
+ sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK,
+ ets_sensor);
if (sensor_location != 0) {
status = hw->phy.ops.read_i2c_byte(hw,
@@ -3971,30 +3959,29 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
* Inits the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
{
- s32 status;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
u8 low_thresh_delta;
u8 num_sensors;
u8 therm_limit;
+ u16 ets_sensor;
+ u16 ets_offset;
+ u16 ets_cfg;
+ int status;
u8 i;
- struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
/* Only support thermal sensors attached to physical port 0 */
if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
if (status)
return status;
- low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
- IXGBE_ETS_LTHRES_DELTA_SHIFT);
+ low_thresh_delta = FIELD_GET(IXGBE_ETS_LTHRES_DELTA_MASK, ets_cfg);
num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
if (num_sensors > IXGBE_MAX_SENSORS)
num_sensors = IXGBE_MAX_SENSORS;
@@ -4008,10 +3995,10 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
ets_offset + 1 + i);
continue;
}
- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
- IXGBE_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
- IXGBE_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK,
+ ets_sensor);
+ sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK,
+ ets_sensor);
therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
hw->phy.ops.write_i2c_byte(hw,
@@ -4205,16 +4192,16 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
*
* Set the link speed in the MAC and/or PHY register and restarts link.
*/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ bool autoneg, link_up = false;
u32 speedcnt = 0;
+ int status = 0;
u32 i = 0;
- bool autoneg, link_up = false;
/* Mask off requested but non-supported speeds */
status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
@@ -4353,8 +4340,8 @@ out:
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u8 rs, eeprom_data;
+ int status;
switch (speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 34761e691d52..6493abf189de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -8,89 +8,89 @@
#include "ixgbe.h"
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on, bool vlvf_bypass);
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver, u16 len, const char *str);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
u32 timeout, bool return_data);
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
@@ -111,8 +111,8 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
#define IXGBE_EMC_DIODE3_DATA 0x2A
#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_get_etk_id(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
@@ -121,7 +121,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index d26cea5b43bd..502666f28124 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -18,7 +18,7 @@
* @max: max credits by traffic class
* @max_frame: maximum frame size
*/
-static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
+static int ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
{
int min_percent = 100;
@@ -59,7 +59,7 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
@@ -247,7 +247,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u8 pfc_en;
@@ -283,7 +283,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -300,7 +300,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
return -EINVAL;
}
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
{
__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
@@ -333,7 +333,7 @@ s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
bwg_id, prio_type, ets->prio_tc);
}
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *prio_type, u8 *prio_tc)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 60cd5863bf5e..91788e4c4e19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -124,15 +124,15 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
+int ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 379ae747cdce..185c3e5f9837 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -15,10 +15,8 @@
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type)
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
@@ -75,11 +73,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
@@ -124,11 +119,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg;
u8 i;
@@ -171,7 +163,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
@@ -224,7 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -260,7 +252,7 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index fdca41abb44c..5bf3f13c6953 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -46,27 +46,19 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 7948849840a5..c61bd9059541 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -17,7 +17,7 @@
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -76,7 +76,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -128,7 +128,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -187,7 +187,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
u32 i, j, fcrtl, reg;
u8 max_tc = 0;
@@ -272,7 +272,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -330,7 +330,7 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index c6f084883cab..f6e5a87c03e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -70,30 +70,21 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type,
u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 4dd897806fa5..6e6e6f1847b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -349,6 +349,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
+ case ixgbe_sfp_type_1g_bx_core0:
+ case ixgbe_sfp_type_1g_bx_core1:
ethtool_link_ksettings_add_link_mode(cmd, supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
@@ -459,7 +461,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
- s32 err = 0;
+ int err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -1413,12 +1415,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < IXGBE_TEST_LEN; i++)
- ethtool_sprintf(&p, "%s", ixgbe_gstrings_test[i]);
+ ethtool_puts(&p, ixgbe_gstrings_test[i]);
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ixgbe_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, ixgbe_gstrings_stats[i].stat_string);
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -3108,35 +3109,37 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
indir[i] = adapter->rss_indir_tbl[i] & rss_m;
}
-static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ixgbe_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- ixgbe_get_reta(adapter, indir);
+ if (rxfh->indir)
+ ixgbe_get_reta(adapter, rxfh->indir);
- if (key)
- memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key,
+ ixgbe_get_rxfh_key_size(netdev));
return 0;
}
-static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ixgbe_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
/* Fill out the redirection table */
- if (indir) {
+ if (rxfh->indir) {
int max_queues = min_t(int, adapter->num_rx_queues,
ixgbe_rss_indir_tbl_max(adapter));
@@ -3147,18 +3150,19 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
/* Verify user input. */
for (i = 0; i < reta_entries; i++)
- if (indir[i] >= max_queues)
+ if (rxfh->indir[i] >= max_queues)
return -EINVAL;
for (i = 0; i < reta_entries; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
ixgbe_store_reta(adapter);
}
/* Fill out the rss hash key */
- if (key) {
- memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
+ if (rxfh->key) {
+ memcpy(adapter->rss_key, rxfh->key,
+ ixgbe_get_rxfh_key_size(netdev));
ixgbe_store_key(adapter);
}
@@ -3324,9 +3328,9 @@ static int ixgbe_get_module_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
+ int status;
if (hw->phy.type == ixgbe_phy_fw)
return -ENXIO;
@@ -3370,7 +3374,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ int status = -EFAULT;
u8 databyte = 0xFF;
int i = 0;
@@ -3401,66 +3405,68 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
static const struct {
ixgbe_link_speed mac_speed;
- u32 supported;
+ u32 link_mode;
} ixgbe_ls_map[] = {
- { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
- { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
- { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
- { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
- { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
+ { IXGBE_LINK_SPEED_10_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_100_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_1GB_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_2_5GB_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT },
+ { IXGBE_LINK_SPEED_10GB_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
};
static const struct {
u32 lp_advertised;
- u32 mac_speed;
+ u32 link_mode;
} ixgbe_lp_map[] = {
- { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
- { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
- { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
- { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
- { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
- { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
+ { FW_PHY_ACT_UD_2_100M_TX_EEE, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_T_EEE, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_T_EEE, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_KX_EEE, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KX4_EEE, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KR_EEE, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
};
static int
-ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
+ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
- s32 rc;
+ int rc;
u16 i;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
if (rc)
return rc;
- edata->lp_advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
if (info[0] & ixgbe_lp_map[i].lp_advertised)
- edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->lp_advertised);
}
- edata->supported = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
- edata->supported |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->supported);
}
- edata->advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
- edata->advertised |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->advertised);
}
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !linkmode_empty(edata->advertised);
edata->tx_lpi_enabled = edata->eee_enabled;
- if (edata->advertised & edata->lp_advertised)
- edata->eee_active = true;
+
+ linkmode_and(common, edata->advertised, edata->lp_advertised);
+ edata->eee_active = !linkmode_empty(common);
return 0;
}
-static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3474,17 +3480,17 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- struct ethtool_eee eee_data;
- s32 ret_val;
+ struct ethtool_keee eee_data;
+ int ret_val;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
return -EOPNOTSUPP;
- memset(&eee_data, 0, sizeof(struct ethtool_eee));
+ memset(&eee_data, 0, sizeof(struct ethtool_keee));
ret_val = ixgbe_get_eee(netdev, &eee_data);
if (ret_val)
@@ -3502,7 +3508,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (eee_data.advertised != edata->advertised) {
+ if (!linkmode_equal(eee_data.advertised, edata->advertised)) {
e_err(drv,
"Setting EEE advertised speeds is not supported\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 7311bd545acf..18d63c8c2ff4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -670,8 +670,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
fcoe->indices);
fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
- fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
- IXGBE_FCRETA_ENTRY_HIGH_MASK;
+ fcoe_q_h = FIELD_PREP(IXGBE_FCRETA_ENTRY_HIGH_MASK,
+ fcoe_q_h);
}
fcoe_i = fcoe->offset + (i % fcoe->indices);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 13a6fca31004..866024f2b9ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -914,7 +914,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
- xs = kzalloc(sizeof(*xs), GFP_KERNEL);
+ algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
+ if (unlikely(!algo)) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ xs = kzalloc(sizeof(*xs), GFP_ATOMIC);
if (unlikely(!xs)) {
err = -ENOMEM;
goto err_out;
@@ -930,14 +936,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
xs->xso.dev = adapter->netdev;
- algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
- if (unlikely(!algo)) {
- err = -ENOENT;
- goto err_xs;
- }
-
aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
- xs->aead = kzalloc(aead_len, GFP_KERNEL);
+ xs->aead = kzalloc(aead_len, GFP_ATOMIC);
if (unlikely(!xs->aead)) {
err = -ENOMEM;
goto err_xs;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 94bde2cad0f4..094653e81b97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -153,7 +153,7 @@ MODULE_PARM_DESC(max_vfs,
#endif /* CONFIG_PCI_IOV */
static bool allow_unsupported_sfp;
-module_param(allow_unsupported_sfp, bool, 0);
+module_param(allow_unsupported_sfp, bool, 0444);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
@@ -205,7 +205,7 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
return 0;
}
-static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
+static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 link_status = 0;
@@ -1106,6 +1106,44 @@ static int ixgbe_tx_maxrate(struct net_device *netdev,
}
/**
+ * ixgbe_update_tx_ring_stats - Update Tx ring specific counters
+ * @tx_ring: ring to update
+ * @q_vector: queue vector ring belongs to
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += bytes;
+ tx_ring->stats.packets += pkts;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += bytes;
+ q_vector->tx.total_packets += pkts;
+}
+
+/**
+ * ixgbe_update_rx_ring_stats - Update Rx ring specific counters
+ * @rx_ring: ring to update
+ * @q_vector: queue vector ring belongs to
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.bytes += bytes;
+ rx_ring->stats.packets += pkts;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_bytes += bytes;
+ q_vector->rx.total_packets += pkts;
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
@@ -1207,12 +1245,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+ ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
+ total_bytes);
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@@ -2429,12 +2463,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
ixgbe_xdp_ring_update_tail_locked(ring);
}
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- q_vector->rx.total_packets += total_rx_packets;
- q_vector->rx.total_bytes += total_rx_bytes;
+ ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
+ total_rx_bytes);
return total_rx_packets;
}
@@ -2756,7 +2786,6 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
- s32 rc;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
@@ -2790,14 +2819,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
}
/* Check if this is not due to overtemp */
- if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
+ if (!hw->phy.ops.check_overtemp(hw))
return;
break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
- rc = hw->phy.ops.check_overtemp(hw);
- if (rc != IXGBE_ERR_OVERTEMP)
+ if (!hw->phy.ops.check_overtemp(hw))
return;
break;
default:
@@ -2941,8 +2969,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -5512,7 +5540,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
u32 speed;
bool autoneg, link_up = false;
- int ret = IXGBE_ERR_LINK_SETUP;
+ int ret = -EIO;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5983,13 +6011,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
err = hw->mac.ops.init_hw(hw);
switch (err) {
case 0:
- case IXGBE_ERR_SFP_NOT_PRESENT:
- case IXGBE_ERR_SFP_NOT_SUPPORTED:
+ case -ENOENT:
+ case -EOPNOTSUPP:
break;
- case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
+ case -EALREADY:
e_dev_err("primary disable timed out\n");
break;
- case IXGBE_ERR_EEPROM_VERSION:
+ case -EACCES:
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
"Please be aware there may be issues associated with "
@@ -6819,7 +6847,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -6946,7 +6974,7 @@ int ixgbe_close(struct net_device *netdev)
return 0;
}
-static int __maybe_unused ixgbe_resume(struct device *dev_d)
+static int ixgbe_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -7054,7 +7082,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-static int __maybe_unused ixgbe_suspend(struct device *dev_d)
+static int ixgbe_suspend(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
int retval;
@@ -7811,7 +7839,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- s32 err;
+ int err;
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
@@ -7829,10 +7857,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
err = hw->phy.ops.identify_sfp(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (err == -EOPNOTSUPP)
goto sfp_out;
- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ if (err == -ENOENT) {
/* If no cable is present, then we need to reset
* the next time we find a good cable. */
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
@@ -7858,7 +7886,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
else
err = hw->mac.ops.setup_sfp(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (err == -EOPNOTSUPP)
goto sfp_out;
adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
@@ -7867,8 +7895,8 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
sfp_out:
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
- if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
- (adapter->netdev->reg_state == NETREG_REGISTERED)) {
+ if (err == -EOPNOTSUPP &&
+ adapter->netdev->reg_state == NETREG_REGISTERED) {
e_dev_err("failed to initialize because an unsupported "
"SFP+ module type was detected.\n");
e_dev_err("Reload the driver after installing a "
@@ -7938,7 +7966,7 @@ static void ixgbe_service_timer(struct timer_list *t)
static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 status;
+ bool overtemp;
if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
return;
@@ -7948,11 +7976,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
if (!hw->phy.ops.handle_lasi)
return;
- status = hw->phy.ops.handle_lasi(&adapter->hw);
- if (status != IXGBE_ERR_OVERTEMP)
- return;
-
- e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ hw->phy.ops.handle_lasi(&adapter->hw, &overtemp);
+ if (overtemp)
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
}
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@@ -10035,15 +10061,10 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- int status;
- __u16 mode;
-
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
+ int status = ixgbe_configure_bridge_mode(adapter, mode);
- mode = nla_get_u16(attr);
- status = ixgbe_configure_bridge_mode(adapter, mode);
if (status)
return status;
@@ -10209,7 +10230,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -10529,6 +10550,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
}
/**
+ * ixgbe_irq_disable_single - Disable single IRQ vector
+ * @adapter: adapter structure
+ * @ring: ring index
+ **/
+static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 qmask = BIT_ULL(ring);
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = qmask & IXGBE_EIMC_RTX_QUEUE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ synchronize_irq(adapter->msix_entries[ring].vector);
+ else
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
* @adapter: adapter structure
* @ring: ring index
@@ -10544,6 +10603,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
+ ixgbe_irq_disable_single(adapter, ring);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
ixgbe_disable_txr(adapter, tx_ring);
if (xdp_ring)
ixgbe_disable_txr(adapter, xdp_ring);
@@ -10552,9 +10616,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
if (xdp_ring)
synchronize_rcu();
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_disable(&rx_ring->q_vector->napi);
-
ixgbe_clean_tx_ring(tx_ring);
if (xdp_ring)
ixgbe_clean_tx_ring(xdp_ring);
@@ -10582,9 +10643,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_enable(&rx_ring->q_vector->napi);
-
ixgbe_configure_tx_ring(adapter, tx_ring);
if (xdp_ring)
ixgbe_configure_tx_ring(adapter, xdp_ring);
@@ -10593,6 +10651,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
if (xdp_ring)
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
@@ -10922,9 +10985,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = hw->mac.ops.reset_hw(hw);
hw->phy.reset_if_overtemp = false;
ixgbe_set_eee_capable(adapter);
- if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ if (err == -ENOENT) {
err = 0;
- } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ } else if (err == -EOPNOTSUPP) {
e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
e_dev_err("Reload the driver after installing a supported module.\n");
goto err_sw_init;
@@ -11143,7 +11206,7 @@ skip_sriov:
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
- if (err == IXGBE_ERR_EEPROM_VERSION) {
+ if (err == -EACCES) {
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
"Please be aware there may be issues associated "
@@ -11371,7 +11434,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if ((pf_func & 1) == (pdev->devfn & 1)) {
unsigned int device_id;
- vf = (req_id & 0x7F) >> 1;
+ vf = FIELD_GET(0x7F, req_id);
e_dev_err("VF %d has caused a PCIe error\n", vf);
e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
"%8.8x\tdw3: %8.8x\n",
@@ -11520,14 +11583,14 @@ static const struct pci_error_handlers ixgbe_err_handler = {
.resume = ixgbe_io_resume,
};
-static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
static struct pci_driver ixgbe_driver = {
.name = ixgbe_driver_name,
.id_table = ixgbe_pci_tbl,
.probe = ixgbe_probe,
.remove = ixgbe_remove,
- .driver.pm = &ixgbe_pm_ops,
+ .driver.pm = pm_sleep_ptr(&ixgbe_pm_ops),
.shutdown = ixgbe_shutdown,
.sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 5679293e53f7..d67d77e5dacc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -15,7 +15,7 @@
*
* returns SUCCESS if it successfully read message from buffer
**/
-s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -24,7 +24,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
size = mbx->size;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->read(hw, msg, size, mbx_id);
}
@@ -38,15 +38,15 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (size > mbx->size)
- return IXGBE_ERR_MBX;
+ return -EINVAL;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->write(hw, msg, size, mbx_id);
}
@@ -58,12 +58,12 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_msg(hw, mbx_id);
}
@@ -75,12 +75,12 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_ack(hw, mbx_id);
}
@@ -92,12 +92,12 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
return mbx->ops->check_for_rst(hw, mbx_id);
}
@@ -109,18 +109,18 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message notification
**/
-static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
while (mbx->ops->check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
- return IXGBE_ERR_MBX;
+ return -EIO;
udelay(mbx->usec_delay);
}
@@ -134,18 +134,18 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
-static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
while (mbx->ops->check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
- return IXGBE_ERR_MBX;
+ return -EIO;
udelay(mbx->usec_delay);
}
@@ -162,14 +162,14 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
-static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
if (!mbx->ops)
- return IXGBE_ERR_MBX;
+ return -EIO;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
if (ret_val)
@@ -189,15 +189,15 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
-static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static int ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops || !mbx->timeout)
- return IXGBE_ERR_MBX;
+ return -EIO;
/* send msg */
ret_val = mbx->ops->write(hw, msg, size, mbx_id);
@@ -208,7 +208,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ixgbe_poll_for_ack(hw, mbx_id);
}
-static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+static int ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
@@ -217,7 +217,7 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -227,9 +227,9 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
@@ -238,7 +238,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -248,9 +248,9 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
@@ -259,7 +259,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
@@ -295,7 +295,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
return 0;
}
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -305,7 +305,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* return SUCCESS if we obtained the mailbox lock
**/
-static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 p2v_mailbox;
@@ -317,7 +317,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
return 0;
- return IXGBE_ERR_MBX;
+ return -EIO;
}
/**
@@ -329,10 +329,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
@@ -368,10 +368,10 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
-static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 8f4316b19278..bd205306934b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -7,7 +7,6 @@
#include "ixgbe_type.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
@@ -97,11 +96,11 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+int ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+int ixgbe_check_for_rst(struct ixgbe_hw *, u16);
#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 689470c1e8ad..07eaa3c3f4d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -11,19 +11,19 @@
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw);
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
* ixgbe_out_i2c_byte_ack - Send I2C byte with ack
@@ -32,9 +32,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
*
* Returns an error code on error.
**/
-static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+static int ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_out_i2c_byte(hw, byte);
if (status)
@@ -49,9 +49,9 @@ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
*
* Returns an error code on error.
**/
-static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+static int ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_in_i2c_byte(hw, byte);
if (status)
@@ -85,7 +85,7 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
*
* Returns an error code on error.
*/
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -102,7 +102,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
csum = ~csum;
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -150,7 +150,7 @@ fail:
hw_dbg(hw, "I2C byte read combined error.\n");
} while (retry < max_retry);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
/**
@@ -163,7 +163,7 @@ fail:
*
* Returns an error code on error.
*/
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -179,7 +179,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
csum = ~csum;
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -215,7 +215,7 @@ fail:
hw_dbg(hw, "I2C byte write combined error.\n");
} while (retry < max_retry);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
/**
@@ -260,10 +260,10 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
*
* Determines the physical layer module found on the current adapter.
**/
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
+ u32 status = -EFAULT;
u32 phy_addr;
- u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
if (!hw->phy.phy_semaphore_mask) {
if (hw->bus.lan_id)
@@ -276,13 +276,12 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
return 0;
if (hw->phy.nw_mng_if_sel) {
- phy_addr = (hw->phy.nw_mng_if_sel &
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ phy_addr = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD,
+ hw->phy.nw_mng_if_sel);
if (ixgbe_probe_phy(hw, phy_addr))
return 0;
else
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
}
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -333,11 +332,11 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
**/
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
- s32 status;
u16 phy_id_high = 0;
u16 phy_id_low = 0;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
@@ -395,11 +394,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
* ixgbe_reset_phy_generic - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
u32 i;
u16 ctrl = 0;
- s32 status = 0;
+ int status = 0;
if (hw->phy.type == ixgbe_phy_unknown)
status = ixgbe_identify_phy_generic(hw);
@@ -408,8 +407,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
return status;
/* Don't reset PHY if it's shut down due to overtemp. */
- if (!hw->phy.reset_if_overtemp &&
- (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw))
return 0;
/* Blocked by MNG FW so bail */
@@ -457,7 +455,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
if (ctrl & MDIO_CTRL1_RESET) {
hw_dbg(hw, "PHY reset polling failed to complete.\n");
- return IXGBE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -472,8 +470,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*
* Reads a value from a specified PHY register without the SWFW lock
**/
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
{
u32 i, data, command;
@@ -500,7 +498,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address command did not complete.\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Address cycle complete, setup and write the read
@@ -527,7 +525,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY read command didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Read operation is complete. Get the data
@@ -548,18 +546,18 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
} else {
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
return status;
@@ -573,8 +571,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
{
u32 i, command;
@@ -604,7 +602,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address cmd didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/*
@@ -632,7 +630,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY write cmd didn't complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -646,18 +644,18 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
} else {
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
return status;
@@ -670,7 +668,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @hw: pointer to hardware structure
* @cmd: command register value to write
**/
-static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
+static int ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
{
IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
@@ -686,11 +684,11 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -720,11 +718,11 @@ mii_bus_read_done:
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -758,11 +756,11 @@ mii_bus_read_done:
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u16 val, u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -789,12 +787,12 @@ static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u16 val,
u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -823,7 +821,7 @@ mii_bus_write_done:
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
+static int ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
@@ -839,7 +837,7 @@ static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
+static int ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -856,7 +854,7 @@ static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
+static int ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -874,7 +872,7 @@ static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
+static int ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -891,7 +889,7 @@ static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -909,7 +907,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
* @devad: device address to read
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
int devad, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -927,7 +925,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -946,7 +944,7 @@ static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
int devad, int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -1025,13 +1023,13 @@ out:
*
* ixgbe_mii_bus_init initializes a mii_bus structure in adapter
**/
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
- s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
- s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum);
- s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
+ int (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ int (*read_c22)(struct mii_bus *bus, int addr, int regnum);
+ int (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
u16 val);
- s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
+ int (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
@@ -1097,12 +1095,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*
* Restart autonegotiation and PHY and waits for completion.
**/
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
- s32 status = 0;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
- bool autoneg = false;
ixgbe_link_speed speed;
+ bool autoneg = false;
+ int status = 0;
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
@@ -1175,7 +1173,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: unused
**/
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -1216,10 +1214,10 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the supported link capabilities by reading the PHY auto
* negotiation register.
*/
-static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
{
u16 speed_ability;
- s32 status;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
@@ -1255,11 +1253,11 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: boolean auto-negotiation value
*/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
- s32 status = 0;
+ int status = 0;
*autoneg = true;
if (!hw->phy.speeds_supported)
@@ -1278,15 +1276,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
**/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up)
{
- s32 status;
- u32 time_out;
u32 max_time_out = 10;
- u16 phy_link = 0;
u16 phy_speed = 0;
+ u16 phy_link = 0;
u16 phy_data = 0;
+ u32 time_out;
+ int status;
/* Initialize speed and link to default case */
*link_up = false;
@@ -1328,7 +1326,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* it is called via a function pointer that could call other
* functions that could return an error.
**/
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = false;
@@ -1401,13 +1399,13 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
{
u16 phy_offset, control, eword, edata, block_crc;
- bool end_data = false;
u16 list_offset, data_offset;
+ bool end_data = false;
u16 phy_data = 0;
- s32 ret_val;
+ int ret_val;
u32 i;
/* Blocked by MNG FW so bail */
@@ -1430,7 +1428,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if ((phy_data & MDIO_CTRL1_RESET) != 0) {
hw_dbg(hw, "PHY reset did not complete.\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/* Get init offsets */
@@ -1448,8 +1446,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
if (ret_val)
goto err_eeprom;
- control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ control = FIELD_GET(IXGBE_CONTROL_MASK_NL, eword);
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
@@ -1487,12 +1484,12 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
hw_dbg(hw, "SOL\n");
} else {
hw_dbg(hw, "Bad control value\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
break;
default:
hw_dbg(hw, "Bad control type\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
}
@@ -1500,7 +1497,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
err_eeprom:
hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/**
@@ -1509,7 +1506,7 @@ err_eeprom:
*
* Determines HW type and calls appropriate function.
**/
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw)
{
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
@@ -1518,10 +1515,10 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
return ixgbe_identify_qsfp_module_generic(hw);
default:
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1530,23 +1527,24 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
*
* Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 bitrate_nominal = 0;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+ u16 enforce_sfp = 0;
u32 vendor_oui = 0;
- enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
- u8 comp_codes_1g = 0;
- u8 comp_codes_10g = 0;
- u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
u8 cable_spec = 0;
- u16 enforce_sfp = 0;
+ int status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/* LAN ID is needed for sfp_type determination */
@@ -1561,7 +1559,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_1GBE_COMP_CODES,
@@ -1579,7 +1577,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
+ if (status)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_BITRATE_NOMINAL,
+ &bitrate_nominal);
if (status)
goto err_read_i2c_eeprom;
@@ -1662,6 +1665,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_lx_core1;
+ /* Support only Ethernet 1000BASE-BX10, checking the Bit Rate
+ * Nominal Value as per SFF-8472 by convention 1.25 Gb/s should
+ * be rounded up to 0Dh (13 in units of 100 MBd) for 1000BASE-BX
+ */
+ } else if ((comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE) &&
+ (bitrate_nominal == 0xD)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1750,9 +1765,11 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
/* Anything else 82598-based is supported */
@@ -1766,7 +1783,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel)
return 0;
@@ -1776,7 +1795,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
hw_dbg(hw, "SFP+ module not supported\n");
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
@@ -1786,7 +1805,7 @@ err_read_i2c_eeprom:
hw->phy.id = 0;
hw->phy.type = ixgbe_phy_unknown;
}
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1795,10 +1814,10 @@ err_read_i2c_eeprom:
*
* Searches for and identifies the QSFP module and assigns appropriate PHY type
**/
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ int status;
u32 vendor_oui = 0;
enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
@@ -1813,7 +1832,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/* LAN ID is needed for sfp_type determination */
@@ -1827,7 +1846,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
hw->phy.id = identifier;
@@ -1895,7 +1914,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
} else {
/* unsupported module type */
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
}
@@ -1955,7 +1974,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
}
hw_dbg(hw, "QSFP module not supported\n");
hw->phy.type = ixgbe_phy_sfp_unsupported;
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
}
@@ -1966,7 +1985,7 @@ err_read_i2c_eeprom:
hw->phy.id = 0;
hw->phy.type = ixgbe_phy_unknown;
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
}
/**
@@ -1978,7 +1997,7 @@ err_read_i2c_eeprom:
* Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
* so it returns the offsets to the phy init sequence block.
**/
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset)
{
@@ -1986,14 +2005,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 sfp_type = hw->phy.sfp_type;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
(hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/*
* Limiting active cables and 1G Phys must be initialized as
@@ -2002,23 +2021,25 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
hw_err(hw, "eeprom read at %d failed\n",
IXGBE_PHY_INIT_OFFSET_NL);
- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ return -EIO;
}
if ((!*list_offset) || (*list_offset == 0xFFFF))
- return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ return -EIO;
/* Shift offset to first ID word */
(*list_offset)++;
@@ -2037,7 +2058,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
goto err_phy;
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
hw_dbg(hw, "SFP+ module not supported\n");
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
} else {
break;
}
@@ -2050,14 +2071,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_id == IXGBE_PHY_INIT_END_NL) {
hw_dbg(hw, "No matching SFP+ module found\n");
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
err_phy:
hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
/**
@@ -2068,7 +2089,7 @@ err_phy:
*
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2084,7 +2105,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2100,7 +2121,7 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
@@ -2134,14 +2155,14 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data, bool lock)
{
- s32 status;
- u32 max_retry = 10;
- u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 max_retry = 10;
bool nack = true;
+ u32 retry = 0;
+ int status;
if (hw->mac.type >= ixgbe_mac_X550)
max_retry = 3;
@@ -2152,7 +2173,7 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
do {
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
ixgbe_i2c_start(hw);
@@ -2224,7 +2245,7 @@ fail:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2241,7 +2262,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2259,16 +2280,16 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
- s32 status;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
u32 max_retry = 1;
u32 retry = 0;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int status;
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
do {
ixgbe_i2c_start(hw);
@@ -2327,7 +2348,7 @@ fail:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2344,7 +2365,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2425,10 +2446,10 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
*
* Clocks in one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
- s32 i;
bool bit = false;
+ int i;
*data = 0;
for (i = 7; i >= 0; i--) {
@@ -2446,12 +2467,12 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
*
* Clocks out one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
{
- s32 status;
- s32 i;
- u32 i2cctl;
bool bit = false;
+ int status;
+ u32 i2cctl;
+ int i;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -2477,14 +2498,14 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
*
* Clocks in/out one bit via I2C data/clock
**/
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
- u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
- s32 status = 0;
- u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
u32 timeout = 10;
bool ack = true;
+ int status = 0;
+ u32 i = 0;
if (data_oe_bit) {
i2cctl |= IXGBE_I2C_DATA_OUT(hw);
@@ -2510,7 +2531,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
if (ack == 1) {
hw_dbg(hw, "I2C ack was not received.\n");
- status = IXGBE_ERR_I2C;
+ status = -EIO;
}
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -2528,7 +2549,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
*
* Clocks in one bit via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2562,10 +2583,10 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
*
* Clocks out one bit via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
- s32 status;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ int status;
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -2582,7 +2603,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
udelay(IXGBE_I2C_T_LOW);
} else {
hw_dbg(hw, "I2C data was not set to %X\n", data);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
return 0;
@@ -2650,7 +2671,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* Sets the I2C data bit
* Asserts the I2C data output enable on X550 hardware.
**/
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2678,7 +2699,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
- return IXGBE_ERR_I2C;
+ return -EIO;
}
return 0;
@@ -2748,29 +2769,31 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Checks if the LASI temp alarm status was triggered due to overtemp
+ *
+ * Return true when an overtemp event detected, otherwise false.
**/
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
{
u16 phy_data = 0;
+ u32 status;
if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
- return 0;
+ return false;
/* Check that the LASI temp alarm status was triggered */
- hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
- MDIO_MMD_PMAPMD, &phy_data);
-
- if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
- return 0;
+ status = hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ MDIO_MMD_PMAPMD, &phy_data);
+ if (status)
+ return false;
- return IXGBE_ERR_OVERTEMP;
+ return !!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM);
}
/** ixgbe_set_copper_phy_power - Control power for copper phy
* @hw: pointer to hardware structure
* @on: true for on, false for off
**/
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
{
u32 status;
u16 reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 6544c4539c0d..14aa2ca51f70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -17,6 +17,7 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_BITRATE_NOMINAL 0xC
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
@@ -39,6 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
@@ -121,57 +123,57 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw);
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val, bool lock);
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val, bool lock);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 9cfdfa8a4355..fcfd0a075eee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -363,8 +363,7 @@ int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
+ int entries = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
@@ -493,7 +492,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
u32 reg_offset, vf_shift, vfre;
- s32 err = 0;
+ int err = 0;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
@@ -776,7 +775,7 @@ static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- s32 retval;
+ int retval;
ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
@@ -969,7 +968,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+ u32 add = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = adapter->hw_tcs;
@@ -992,8 +991,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
- int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
- IXGBE_VT_MSGINFO_SHIFT;
+ int index = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
int err;
if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
@@ -1256,7 +1254,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
- s32 retval;
+ int retval;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -1327,7 +1325,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
- retval = IXGBE_ERR_MBX;
+ retval = -EIO;
break;
}
@@ -1420,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- s32 retval;
+ int retval;
if (vf >= adapter->num_vfs)
return -EINVAL;
@@ -1849,5 +1847,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
ivi->trusted = adapter->vfinfo[vf].trusted;
+ ivi->linkstate = adapter->vfinfo[vf].link_state;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index f1f69ce67420..78deea5ec536 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -46,4 +46,11 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
+void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes);
+void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes);
+
#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2b00db92b08f..897fe357b65b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2179,7 +2179,6 @@ enum {
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
@@ -3210,6 +3209,9 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_1g_lx_core0 = 13,
ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_1g_bx_core0 = 15,
+ ixgbe_sfp_type_1g_bx_core1 = 16,
+
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -3393,50 +3395,50 @@ struct ixgbe_hw;
/* Function pointer table */
struct ixgbe_eeprom_operations {
- s32 (*init_params)(struct ixgbe_hw *);
- s32 (*read)(struct ixgbe_hw *, u16, u16 *);
- s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*write)(struct ixgbe_hw *, u16, u16);
- s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
- s32 (*update_checksum)(struct ixgbe_hw *);
- s32 (*calc_checksum)(struct ixgbe_hw *);
+ int (*init_params)(struct ixgbe_hw *);
+ int (*read)(struct ixgbe_hw *, u16, u16 *);
+ int (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*write)(struct ixgbe_hw *, u16, u16);
+ int (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ int (*update_checksum)(struct ixgbe_hw *);
+ int (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
- s32 (*init_hw)(struct ixgbe_hw *);
- s32 (*reset_hw)(struct ixgbe_hw *);
- s32 (*start_hw)(struct ixgbe_hw *);
- s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ int (*init_hw)(struct ixgbe_hw *);
+ int (*reset_hw)(struct ixgbe_hw *);
+ int (*start_hw)(struct ixgbe_hw *);
+ int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
- s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
- s32 (*stop_adapter)(struct ixgbe_hw *);
- s32 (*get_bus_info)(struct ixgbe_hw *);
+ int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ int (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ int (*stop_adapter)(struct ixgbe_hw *);
+ int (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
- s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
- s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
- s32 (*setup_sfp)(struct ixgbe_hw *);
- s32 (*disable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ int (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ int (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ int (*setup_sfp)(struct ixgbe_hw *);
+ int (*disable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ int (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
void (*init_swfw_sync)(struct ixgbe_hw *);
- s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
- s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+ int (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ int (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
void (*stop_link_on_d3)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
- s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ int (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ int (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
@@ -3444,38 +3446,38 @@ struct ixgbe_mac_operations {
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
/* LED */
- s32 (*led_on)(struct ixgbe_hw *, u32);
- s32 (*led_off)(struct ixgbe_hw *, u32);
- s32 (*blink_led_start)(struct ixgbe_hw *, u32);
- s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
- s32 (*init_led_link_act)(struct ixgbe_hw *);
+ int (*led_on)(struct ixgbe_hw *, u32);
+ int (*led_off)(struct ixgbe_hw *, u32);
+ int (*blink_led_start)(struct ixgbe_hw *, u32);
+ int (*blink_led_stop)(struct ixgbe_hw *, u32);
+ int (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
- s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
- s32 (*clear_rar)(struct ixgbe_hw *, u32);
- s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
- s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*enable_mc)(struct ixgbe_hw *);
- s32 (*disable_mc)(struct ixgbe_hw *);
- s32 (*clear_vfta)(struct ixgbe_hw *);
- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
- s32 (*init_uta_tables)(struct ixgbe_hw *);
+ int (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ int (*clear_rar)(struct ixgbe_hw *, u32);
+ int (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+ int (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*init_rx_addrs)(struct ixgbe_hw *);
+ int (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ int (*enable_mc)(struct ixgbe_hw *);
+ int (*disable_mc)(struct ixgbe_hw *);
+ int (*clear_vfta)(struct ixgbe_hw *);
+ int (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
+ int (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
- s32 (*fc_enable)(struct ixgbe_hw *);
- s32 (*setup_fc)(struct ixgbe_hw *);
+ int (*fc_enable)(struct ixgbe_hw *);
+ int (*setup_fc)(struct ixgbe_hw *);
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ int (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
const char *);
- s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
- s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ int (*get_thermal_sensor_data)(struct ixgbe_hw *);
+ int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
@@ -3484,47 +3486,47 @@ struct ixgbe_mac_operations {
void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* DMA Coalescing */
- s32 (*dmac_config)(struct ixgbe_hw *hw);
- s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
- s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
- s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
- s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+ int (*dmac_config)(struct ixgbe_hw *hw);
+ int (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ int (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
- s32 (*identify)(struct ixgbe_hw *);
- s32 (*identify_sfp)(struct ixgbe_hw *);
- s32 (*init)(struct ixgbe_hw *);
- s32 (*reset)(struct ixgbe_hw *);
- s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_internal_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
- s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
- s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
- s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
- s32 (*check_overtemp)(struct ixgbe_hw *);
- s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
- s32 (*enter_lplu)(struct ixgbe_hw *);
- s32 (*handle_lasi)(struct ixgbe_hw *hw);
- s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*identify)(struct ixgbe_hw *);
+ int (*identify_sfp)(struct ixgbe_hw *);
+ int (*init)(struct ixgbe_hw *);
+ int (*reset)(struct ixgbe_hw *);
+ int (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ int (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
+ int (*setup_link)(struct ixgbe_hw *);
+ int (*setup_internal_link)(struct ixgbe_hw *);
+ int (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ int (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ int (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ int (*read_i2c_sff8472)(struct ixgbe_hw *, u8, u8 *);
+ int (*read_i2c_eeprom)(struct ixgbe_hw *, u8, u8 *);
+ int (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ bool (*check_overtemp)(struct ixgbe_hw *);
+ int (*set_phy_power)(struct ixgbe_hw *, bool on);
+ int (*enter_lplu)(struct ixgbe_hw *);
+ int (*handle_lasi)(struct ixgbe_hw *hw, bool *);
+ int (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 *value);
- s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 value);
};
struct ixgbe_link_operations {
- s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
- s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ int (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val);
- s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
- s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+ int (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val);
};
@@ -3602,14 +3604,14 @@ struct ixgbe_phy_info {
#include "ixgbe_mbx.h"
struct ixgbe_mbx_operations {
- s32 (*init_params)(struct ixgbe_hw *hw);
- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct ixgbe_hw *, u16);
- s32 (*check_for_ack)(struct ixgbe_hw *, u16);
- s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+ int (*init_params)(struct ixgbe_hw *hw);
+ int (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*check_for_msg)(struct ixgbe_hw *, u16);
+ int (*check_for_ack)(struct ixgbe_hw *, u16);
+ int (*check_for_rst)(struct ixgbe_hw *, u16);
};
struct ixgbe_mbx_stats {
@@ -3656,7 +3658,7 @@ struct ixgbe_hw {
struct ixgbe_info {
enum ixgbe_mac_type mac;
- s32 (*get_invariants)(struct ixgbe_hw *);
+ int (*get_invariants)(struct ixgbe_hw *);
const struct ixgbe_mac_operations *mac_ops;
const struct ixgbe_eeprom_operations *eeprom_ops;
const struct ixgbe_phy_operations *phy_ops;
@@ -3665,45 +3667,6 @@ struct ixgbe_info {
const u32 *mvals;
};
-
-/* Error Codes */
-#define IXGBE_ERR_EEPROM -1
-#define IXGBE_ERR_EEPROM_CHECKSUM -2
-#define IXGBE_ERR_PHY -3
-#define IXGBE_ERR_CONFIG -4
-#define IXGBE_ERR_PARAM -5
-#define IXGBE_ERR_MAC_TYPE -6
-#define IXGBE_ERR_UNKNOWN_PHY -7
-#define IXGBE_ERR_LINK_SETUP -8
-#define IXGBE_ERR_ADAPTER_STOPPED -9
-#define IXGBE_ERR_INVALID_MAC_ADDR -10
-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
-#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
-#define IXGBE_ERR_RESET_FAILED -15
-#define IXGBE_ERR_SWFW_SYNC -16
-#define IXGBE_ERR_PHY_ADDR_INVALID -17
-#define IXGBE_ERR_I2C -18
-#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
-#define IXGBE_ERR_SFP_NOT_PRESENT -20
-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
-#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
-#define IXGBE_ERR_FDIR_REINIT_FAILED -23
-#define IXGBE_ERR_EEPROM_VERSION -24
-#define IXGBE_ERR_NO_SPACE -25
-#define IXGBE_ERR_OVERTEMP -26
-#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
-#define IXGBE_ERR_FC_NOT_SUPPORTED -28
-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
-#define IXGBE_ERR_PBA_SECTION -31
-#define IXGBE_ERR_INVALID_ARGUMENT -32
-#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
-#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
-#define IXGBE_ERR_FW_RESP_INVALID -39
-#define IXGBE_ERR_TOKEN_RETRY -40
-#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
#define IXGBE_FUSES0_REV_MASK (3u << 6)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index d5cfb51ff648..f1ffa398f6df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -16,9 +16,9 @@
#define IXGBE_X540_VFT_TBL_SIZE 128
#define IXGBE_X540_RX_PB_SIZE 384
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
@@ -26,7 +26,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
return ixgbe_media_type_copper;
}
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -51,7 +51,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
@@ -66,11 +66,11 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
- s32 status;
- u32 ctrl, i;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -84,7 +84,7 @@ mac_reset_top:
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status) {
hw_dbg(hw, "semaphore failed with %d", status);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ctrl = IXGBE_CTRL_RST;
@@ -103,7 +103,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
msleep(100);
@@ -166,9 +166,9 @@ mac_reset_top:
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -184,19 +184,19 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ u16 eeprom_size;
+ u32 eec;
+
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
@@ -215,12 +215,12 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_read_eerd_generic(hw, offset, data);
@@ -237,13 +237,13 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
@@ -259,12 +259,12 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_eewr_generic(hw, offset, data);
@@ -281,13 +281,13 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data);
@@ -303,7 +303,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
*
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -324,7 +324,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
for (i = 0; i < checksum_last_word; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -349,7 +349,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* Skip pointer section if length is invalid. */
@@ -360,7 +360,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
for (j = pointer + 1; j <= pointer + length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word)) {
hw_dbg(hw, "EEPROM read failed\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
checksum += word;
}
@@ -368,7 +368,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -379,12 +379,12 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -397,7 +397,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
@@ -418,7 +418,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
*/
if (read_checksum != checksum) {
hw_dbg(hw, "Invalid EEPROM checksum");
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
}
/* If the user cares, return the calculated checksum */
@@ -439,10 +439,10 @@ out:
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -455,7 +455,7 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
@@ -484,13 +484,13 @@ out:
* Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
* EEPROM from shadow RAM to the flash device.
**/
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
+ int status;
u32 flup;
- s32 status;
status = ixgbe_poll_flash_update_done_X540(hw);
- if (status == IXGBE_ERR_EEPROM) {
+ if (status == -EIO) {
hw_dbg(hw, "Flash update time out\n");
return status;
}
@@ -529,7 +529,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
* Polls the FLUDONE (bit 26) of the EEC Register to determine when the
* flash update is done.
**/
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
{
u32 i;
u32 reg;
@@ -540,7 +540,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
return 0;
udelay(5);
}
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -551,7 +551,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
@@ -575,7 +575,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
@@ -599,7 +599,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* bits in the SW_FW_SYNC register.
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (swfw_sync & (fwmask | hwmask)) {
swfw_sync |= swmask;
@@ -622,11 +622,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
rmask |= IXGBE_GSSR_I2C_MASK;
ixgbe_release_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
/**
@@ -660,7 +660,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
*/
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -680,7 +680,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
if (i == timeout) {
hw_dbg(hw,
"Software semaphore SMBI between device drivers not granted.\n");
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/* Now get the semaphore between SW/FW through the REGSMP bit */
@@ -697,7 +697,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
*/
hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
ixgbe_release_swfw_sync_semaphore(hw);
- return IXGBE_ERR_EEPROM;
+ return -EIO;
}
/**
@@ -760,7 +760,7 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
@@ -768,7 +768,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
bool link_up;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
@@ -798,13 +798,13 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
if (index > 3)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* Restore the LED to its default value. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index e246c0d2a427..b69a680d3ab5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -3,17 +3,17 @@
#include "ixgbe_type.h"
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index aa4bf6c9a2f7..2decb0710b6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -6,13 +6,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -29,7 +29,7 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -41,7 +41,7 @@ static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -55,7 +55,7 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -91,7 +91,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
*
* Returns status code
*/
-static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+static int ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
{
return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -104,7 +104,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+static int ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
{
return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -117,9 +117,9 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
*
* Returns status code
*/
-static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+static int ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
{
- s32 status;
+ int status;
status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
if (status)
@@ -135,9 +135,9 @@ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+static int ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
{
- s32 status;
+ int status;
status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
value);
@@ -153,9 +153,9 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
* This function assumes that the caller has acquired the proper semaphore.
* Returns error code
*/
-static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+static int ixgbe_reset_cs4227(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u32 retry;
u16 value;
u8 reg;
@@ -206,13 +206,13 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
}
if (retry == IXGBE_CS4227_RETRIES) {
hw_err(hw, "CS4227 reset did not complete\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
hw_err(hw, "CS4227 EEPROM did not load successfully\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -225,7 +225,7 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- s32 status;
+ int status;
u16 value;
u8 retry;
@@ -292,7 +292,7 @@ out:
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
@@ -347,16 +347,16 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
}
-static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- return IXGBE_NOT_IMPLEMENTED;
+ return -EOPNOTSUPP;
}
/**
@@ -368,7 +368,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+static int ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -383,7 +383,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
@@ -399,7 +399,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -414,7 +414,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
@@ -427,7 +427,7 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
* @activity: activity to perform
* @data: Pointer to 4 32-bit words of data
*/
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT])
{
union {
@@ -435,7 +435,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
struct ixgbe_hic_phy_activity_resp rsp;
} hic;
u16 retries = FW_PHY_ACT_RETRIES;
- s32 rc;
+ int rc;
u32 i;
do {
@@ -463,7 +463,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
--retries;
} while (retries > 0);
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
}
static const struct {
@@ -484,12 +484,12 @@ static const struct {
*
* Returns error code
*/
-static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
u16 phy_speeds;
u16 phy_id_lo;
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.id)
@@ -511,7 +511,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
hw->phy.autoneg_advertised = hw->phy.speeds_supported;
hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
@@ -526,7 +526,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
{
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
@@ -545,7 +545,7 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+static int ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
@@ -557,10 +557,10 @@ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
* ixgbe_setup_fw_link - Setup firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+static int ixgbe_setup_fw_link(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
@@ -568,7 +568,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
switch (hw->fc.requested_mode) {
@@ -600,8 +600,10 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
if (rc)
return rc;
+
if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
- return IXGBE_ERR_OVERTEMP;
+ return -EIO;
+
return 0;
}
@@ -611,7 +613,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
*/
-static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
{
if (hw->fc.requested_mode == ixgbe_fc_default)
hw->fc.requested_mode = ixgbe_fc_full;
@@ -625,19 +627,19 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+static int ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ u16 eeprom_size;
+ u32 eec;
+
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
@@ -657,7 +659,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
*
* Note: ctrl can be NULL if the IOSF control register value is not needed
*/
-static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
{
u32 i, command;
@@ -675,7 +677,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
*ctrl = command;
if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
hw_dbg(hw, "IOSF wait timed out\n");
- return IXGBE_ERR_PHY;
+ return -EIO;
}
return 0;
@@ -688,12 +690,12 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
* @device_type: 3 bit device type
* @phy_data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -712,10 +714,10 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
hw_dbg(hw, "Failed to read, error %x\n", error);
- return IXGBE_ERR_PHY;
+ ret = -EIO;
+ goto out;
}
if (!ret)
@@ -730,10 +732,10 @@ out:
* ixgbe_get_phy_token - Get the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -750,19 +752,19 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return 0;
if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
- return IXGBE_ERR_FW_RESP_INVALID;
+ return -EIO;
- return IXGBE_ERR_TOKEN_RETRY;
+ return -EAGAIN;
}
/**
* ixgbe_put_phy_token - Put the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_put_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -778,7 +780,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
return status;
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return 0;
- return IXGBE_ERR_FW_RESP_INVALID;
+ return -EIO;
}
/**
@@ -788,7 +790,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 data)
{
@@ -814,7 +816,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 3 bit device type
* @data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 *data)
{
@@ -822,7 +824,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
struct ixgbe_hic_internal_phy_req cmd;
struct ixgbe_hic_internal_phy_resp rsp;
} hic;
- s32 status;
+ int status;
memset(&hic, 0, sizeof(hic));
hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
@@ -849,14 +851,14 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
*
* Reads a 16 bit word(s) from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
u32 current_word = 0;
u16 words_to_read;
- s32 status;
+ int status;
u32 i;
/* Take semaphore for the entire operation. */
@@ -921,14 +923,14 @@ out:
*
* Returns error status for any failure
**/
-static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+static int ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
u16 size, u16 *csum, u16 *buffer,
u32 buffer_size)
{
- u16 buf[256];
- s32 status;
u16 length, bufsz, i, start;
u16 *local_buffer;
+ u16 buf[256];
+ int status;
bufsz = ARRAY_SIZE(buf);
@@ -942,7 +944,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
local_buffer = buf;
} else {
if (buffer_size < ptr)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
local_buffer = &buffer[ptr];
}
@@ -960,7 +962,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
}
if (buffer && ((u32)start + (u32)length > buffer_size))
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
for (i = start; length; i++, length--) {
if (i == bufsz && !buffer) {
@@ -989,14 +991,14 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
u32 buffer_size)
{
u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 pointer, i, size;
u16 *local_buffer;
- s32 status;
u16 checksum = 0;
- u16 pointer, i, size;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1012,7 +1014,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
local_buffer = eeprom_ptrs;
} else {
if (buffer_size < IXGBE_EEPROM_LAST_WORD)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
local_buffer = buffer;
}
@@ -1058,7 +1060,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1066,7 +1068,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
return ixgbe_calc_checksum_X550(hw, NULL, 0);
}
@@ -1078,11 +1080,11 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
- s32 status;
+ int status;
buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1116,12 +1118,12 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1148,7 +1150,7 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
* calculated checksum
*/
if (read_checksum != checksum) {
- status = IXGBE_ERR_EEPROM_CHECKSUM;
+ status = -EIO;
hw_dbg(hw, "Invalid EEPROM checksum");
}
@@ -1166,11 +1168,11 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data)
{
- s32 status;
struct ixgbe_hic_write_shadow_ram buffer;
+ int status;
buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1194,16 +1196,16 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status = 0;
+ int status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
} else {
hw_dbg(hw, "write ee hostif failed to get semaphore");
- status = IXGBE_ERR_SWFW_SYNC;
+ status = -EBUSY;
}
return status;
@@ -1214,10 +1216,10 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
**/
-static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X550(struct ixgbe_hw *hw)
{
- s32 status = 0;
union ixgbe_hic_hdr2 buffer;
+ int status = 0;
buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
buffer.req.buf_lenh = 0;
@@ -1236,7 +1238,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
* Sets bus link width and speed to unknown because X550em is
* not a PCI device.
**/
-static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
{
hw->bus.type = ixgbe_bus_type_internal;
hw->bus.width = ixgbe_bus_width_unknown;
@@ -1267,9 +1269,9 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
- u32 rxctrl, pfdtxgswc;
- s32 status;
struct ixgbe_hic_disable_rxen fw_cmd;
+ u32 rxctrl, pfdtxgswc;
+ int status;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
@@ -1309,10 +1311,10 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum = 0;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1349,11 +1351,11 @@ static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Write a 16 bit word(s) to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words,
u16 *data)
{
- s32 status = 0;
+ int status = 0;
u32 i = 0;
/* Take semaphore for the entire operation. */
@@ -1385,12 +1387,12 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -1412,10 +1414,9 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
hw_dbg(hw, "Failed to write, error %x\n", error);
- return IXGBE_ERR_PHY;
+ return -EIO;
}
out:
@@ -1429,10 +1430,10 @@ out:
*
* iXfI configuration needed for ixgbe_mac_X550EM_x devices.
**/
-static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
+static int ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
{
- s32 status;
u32 reg_val;
+ int status;
/* Disable training protocol FSM. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
@@ -1501,10 +1502,10 @@ static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
* internal PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
{
- s32 status;
u32 link_ctrl;
+ int status;
/* Restart auto-negotiation. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
@@ -1550,15 +1551,15 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
* Configures the integrated KR PHY to use iXFI mode. Used to connect an
* internal and external PHY at a specific speed, without autonegotiation.
**/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* iXFI is only supported with X552 */
if (mac->type != ixgbe_mac_X550EM_x)
- return IXGBE_ERR_LINK_SETUP;
+ return -EIO;
/* Disable AN and force speed to 10G Serial. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
@@ -1580,7 +1581,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
break;
default:
/* Other link speeds are not supported by internal KR PHY. */
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
}
status = ixgbe_write_iosf_sb_reg_x550(hw,
@@ -1607,11 +1608,11 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @hw: pointer to hardware structure
* @linear: true if SFP module is linear
*/
-static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+static int ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
{
switch (hw->phy.sfp_type) {
case ixgbe_sfp_type_not_present:
- return IXGBE_ERR_SFP_NOT_PRESENT;
+ return -ENOENT;
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
*linear = true;
@@ -1630,7 +1631,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
default:
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
return 0;
@@ -1644,14 +1645,14 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
*
* Configures the extern PHY and the integrated KR PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
- s32 status;
- u16 reg_slice, reg_val;
bool setup_linear = false;
+ u16 reg_slice, reg_val;
+ int status;
/* Check if SFP module is supported and linear */
status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1660,7 +1661,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
* there is no reason to configure CS4227 and SFP not present error is
* not accepted in the setup MAC link flow.
*/
- if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status == -ENOENT)
return 0;
if (status)
@@ -1690,11 +1691,11 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
* Configures the integrated PHY for native SFI mode. Used to connect the
* internal PHY directly to an SFP cage, without autonegotiation.
**/
-static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* Disable all AN and force speed to 10G Serial. */
status = mac->ops.read_iosf_sb_reg(hw,
@@ -1718,7 +1719,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
break;
default:
/* Other link speeds are not supported by internal PHY. */
- return IXGBE_ERR_LINK_SETUP;
+ return -EINVAL;
}
(void)mac->ops.write_iosf_sb_reg(hw,
@@ -1789,13 +1790,13 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
*
* Configure the integrated PHY for native SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
bool setup_linear = false;
u32 reg_phy_int;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1803,7 +1804,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* If no SFP module present, then return success. Return success since
* SFP not present error is not excepted in the setup MAC link flow.
*/
- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (ret_val == -ENOENT)
return 0;
if (ret_val)
@@ -1838,14 +1839,14 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Configure the integrated PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
u32 reg_slice, slice_offset;
bool setup_linear = false;
u16 reg_phy_ext;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1853,7 +1854,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* If no SFP module present, then return success. Return success since
* SFP not present error is not excepted in the setup MAC link flow.
*/
- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (ret_val == -ENOENT)
return 0;
if (ret_val)
@@ -1863,7 +1864,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ixgbe_setup_kr_speed_x550em(hw, speed);
if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
- return IXGBE_ERR_PHY_ADDR_INVALID;
+ return -EFAULT;
/* Get external PHY SKU id */
ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
@@ -1917,12 +1918,12 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Returns error status for any failure
**/
-static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait)
{
- s32 status;
ixgbe_link_speed force_speed;
+ int status;
/* Setup internal/external PHY link speed to iXFI (10G), unless
* only 1G is auto advertised then setup KX link.
@@ -1953,7 +1954,7 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
*
* Check that both the MAC and X557 external PHY have link.
**/
-static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool link_up_wait_to_complete)
@@ -1962,7 +1963,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
u16 i, autoneg_status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
- return IXGBE_ERR_CONFIG;
+ return -EIO;
status = ixgbe_check_mac_link_generic(hw, speed, link_up,
link_up_wait_to_complete);
@@ -1997,13 +1998,13 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
* @speed: unused
* @autoneg_wait_to_complete: unused
*/
-static s32
+static int
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2070,12 +2071,12 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
* @speed: the link speed to force
* @autoneg_wait: true when waiting for completion is needed
*/
-static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+static int ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2145,9 +2146,9 @@ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*/
static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -2165,7 +2166,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
/* Check if auto-negotiation has completed */
status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
- status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ status = -EIO;
goto out;
}
@@ -2275,10 +2276,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
{
- s32 status;
bool linear;
+ int status;
/* Check if SFP module is supported */
status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
@@ -2296,7 +2297,7 @@ static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
**/
-static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -2369,18 +2370,18 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* @hw: pointer to hardware structure
* @lsc: pointer to boolean flag which indicates whether external Base T
* PHY interrupt is lsc
+ * @is_overtemp: indicate whether an overtemp event encountered
*
* Determime if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
- *
- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
- * failure alarm, else return PHY access status.
**/
-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
+static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
+ bool *is_overtemp)
{
u32 status;
u16 reg;
+ *is_overtemp = false;
*lsc = false;
/* Vendor alarm triggered */
@@ -2412,7 +2413,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
/* power down the PHY in case the PHY FW didn't already */
ixgbe_set_copper_phy_power(hw, false);
- return IXGBE_ERR_OVERTEMP;
+ *is_overtemp = true;
+ return -EIO;
}
if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
/* device fault alarm triggered */
@@ -2426,7 +2428,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
/* power down the PHY in case the PHY FW didn't */
ixgbe_set_copper_phy_power(hw, false);
- return IXGBE_ERR_OVERTEMP;
+ *is_overtemp = true;
+ return -EIO;
}
}
@@ -2460,14 +2463,14 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
*
* Returns PHY access status
**/
-static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
+ bool lsc, overtemp;
u32 status;
u16 reg;
- bool lsc;
/* Clear interrupt flags */
- status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, &overtemp);
/* Enable link status change alarm */
@@ -2546,21 +2549,20 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
* @hw: pointer to hardware structure
+ * @is_overtemp: indicate whether an overtemp event encountered
*
* Handle external Base T PHY interrupt. If high temperature
* failure alarm then return error, else if link status change
* then setup internal/external PHY link
- *
- * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
- * failure alarm, else return PHY access status.
**/
-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
+ bool *is_overtemp)
{
struct ixgbe_phy_info *phy = &hw->phy;
bool lsc;
u32 status;
- status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, is_overtemp);
if (status)
return status;
@@ -2577,11 +2579,11 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
*
* Configures the integrated KR PHY.
**/
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u32 reg_val;
+ int status;
status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2632,7 +2634,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
/* leave link alone for 2.5G */
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
@@ -2650,7 +2652,7 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
*
* Returns error code if unable to get link status.
**/
-static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
{
u32 ret;
u16 autoneg_status;
@@ -2684,7 +2686,7 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
* A return of a non-zero value indicates an error, and the base driver should
* not report link up.
**/
-static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
{
ixgbe_link_speed force_speed;
bool link_up;
@@ -2692,7 +2694,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
u16 speed;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
- return IXGBE_ERR_CONFIG;
+ return -EIO;
if (!(hw->mac.type == ixgbe_mac_X550EM_x &&
!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) {
@@ -2735,7 +2737,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
break;
default:
/* Internal PHY does not support anything else */
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
return ixgbe_setup_ixfi_x550em(hw, &force_speed);
@@ -2744,9 +2746,9 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
status = ixgbe_reset_phy_generic(hw);
@@ -2762,12 +2764,12 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @led_idx: led number to turn on
**/
-static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
@@ -2784,12 +2786,12 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* @hw: pointer to hardware structure
* @led_idx: led number to turn off
**/
-static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
- return IXGBE_ERR_PARAM;
+ return -EINVAL;
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
@@ -2813,19 +2815,20 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
*
* Sends driver version number to firmware through the manageability
* block. On success return 0
- * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
- * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ * else returns -EBUSY when encountering an error acquiring
+ * semaphore, -EIO when command fails or -ENIVAL when incorrect
+ * params passed.
**/
-static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, u16 len,
const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
- s32 ret_val;
+ int ret_val;
int i;
if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
- return IXGBE_ERR_INVALID_ARGUMENT;
+ return -EINVAL;
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
@@ -2850,7 +2853,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
if (fw_cmd.hdr.cmd_or_resp.ret_status !=
FW_CEM_RESP_STATUS_SUCCESS)
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ return -EIO;
return 0;
}
@@ -2863,12 +2866,12 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
*
* Determine lowest common link speed with link partner.
**/
-static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed *lcd_speed)
{
- u16 an_lp_status;
- s32 status;
u16 word = hw->eeprom.ctrl_word_3;
+ u16 an_lp_status;
+ int status;
*lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -2881,33 +2884,33 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
/* If link partner advertised 1G, return 1G */
if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
*lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
- return status;
+ return 0;
}
/* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
(word & NVM_INIT_CTRL_3_D10GMP_PORT0))
- return status;
+ return 0;
/* Link partner not capable of lower speeds, return 10G */
*lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
- return status;
+ return 0;
}
/**
* ixgbe_setup_fc_x550em - Set up flow control
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
{
bool pause, asm_dir;
u32 reg_val;
- s32 rc = 0;
+ int rc = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
/* 10gig parts do not have a word in the EEPROM to determine the
@@ -2942,7 +2945,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
break;
default:
hw_err(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
switch (hw->device_id) {
@@ -2986,8 +2989,8 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
{
u32 link_s1, lp_an_page_low, an_cntl_1;
- s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -3013,7 +3016,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
hw_dbg(hw, "Auto-Negotiation did not complete\n");
- status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ status = -EIO;
goto out;
}
@@ -3070,13 +3073,13 @@ static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
* (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
* the X557 PHY immediately prior to entering LPLU.
**/
-static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
{
u16 an_10g_cntl_reg, autoneg_reg, speed;
- s32 status;
ixgbe_link_speed lcd_speed;
u32 save_autoneg;
bool link_up;
+ int status;
/* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw))
@@ -3127,7 +3130,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
(lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
(lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
- return status;
+ return 0;
/* Clear AN completed indication */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
@@ -3164,10 +3167,10 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
* ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return 0;
@@ -3187,21 +3190,23 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
/**
* ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
* @hw: pointer to hardware structure
+ *
+ * Return true when an overtemp event detected, otherwise false.
*/
-static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
- return rc;
+ return false;
if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
ixgbe_shutdown_fw_phy(hw);
- return IXGBE_ERR_OVERTEMP;
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -3222,9 +3227,8 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
*/
if (hw->mac.type == ixgbe_mac_x550em_a &&
hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
- hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ hw->phy.mdio.prtad = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD,
+ hw->phy.nw_mng_if_sel);
}
}
@@ -3235,10 +3239,10 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
* set during init_shared_code because the PHY/SFP type was
* not known. Perform the SFP init if necessary.
**/
-static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
hw->mac.ops.set_lan_id(hw);
@@ -3251,8 +3255,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
- ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
+ if (ret_val == -EOPNOTSUPP || ret_val == -EFAULT)
return ret_val;
/* Setup function pointers based on detected hardware */
@@ -3364,9 +3367,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
-static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u16 reg;
status = hw->phy.ops.read_reg(hw,
@@ -3438,14 +3441,14 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
** reset.
**/
-static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
ixgbe_link_speed link_speed;
- s32 status;
+ bool link_up = false;
u32 ctrl = 0;
+ int status;
u32 i;
- bool link_up = false;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -3460,8 +3463,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
- status == IXGBE_ERR_PHY_ADDR_INVALID)
+ if (status == -EOPNOTSUPP || status == -EFAULT)
return status;
/* start the external PHY */
@@ -3477,7 +3479,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = false;
}
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status == -EOPNOTSUPP)
return status;
/* Reset PHY */
@@ -3501,7 +3503,7 @@ mac_reset_top:
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status) {
hw_dbg(hw, "semaphore failed with %d", status);
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
}
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
@@ -3519,7 +3521,7 @@ mac_reset_top:
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
- status = IXGBE_ERR_RESET_FAILED;
+ status = -EIO;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
@@ -3607,15 +3609,15 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
{
- s32 status = 0;
u32 an_cntl = 0;
+ int status = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ return -EINVAL;
}
if (hw->fc.requested_mode == ixgbe_fc_default)
@@ -3672,7 +3674,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
break;
default:
hw_err(hw, "Flow control param set incorrectly\n");
- return IXGBE_ERR_CONFIG;
+ return -EIO;
}
status = hw->mac.ops.write_iosf_sb_reg(hw,
@@ -3712,9 +3714,9 @@ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
*
* Acquires the SWFW semaphore and sets the I2C MUX
*/
-static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
{
- s32 status;
+ int status;
status = ixgbe_acquire_swfw_sync_X540(hw, mask);
if (status)
@@ -3748,11 +3750,11 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
*
* Acquires the SWFW semaphore and get the shared PHY token as needed
*/
-static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
int retries = FW_PHY_TOKEN_RETRIES;
- s32 status;
+ int status;
while (--retries) {
status = 0;
@@ -3768,7 +3770,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
return 0;
if (hmask)
ixgbe_release_swfw_sync_X540(hw, hmask);
- if (status != IXGBE_ERR_TOKEN_RETRY)
+ if (status != -EAGAIN)
return status;
msleep(FW_PHY_TOKEN_DELAY);
}
@@ -3805,14 +3807,14 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* Token. The PHY Token is needed since the MDIO is shared between to MAC
* instances.
*/
-static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
@@ -3831,14 +3833,14 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* Writes a value to specified PHY register using the SWFW lock and PHY Token.
* The PHY Token is needed since the MDIO is shared between to MAC instances.
*/
-static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
- return IXGBE_ERR_SWFW_SYNC;
+ return -EBUSY;
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
hw->mac.ops.release_swfw_sync(hw, mask);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 59798bc33298..397cb773fabb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -220,8 +220,7 @@ static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -359,12 +358,8 @@ construct_skb:
ixgbe_xdp_ring_update_tail_locked(ring);
}
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- q_vector->rx.total_packets += total_rx_packets;
- q_vector->rx.total_bytes += total_rx_bytes;
+ ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
+ total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
@@ -499,13 +494,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
}
tx_ring->next_to_clean = ntc;
-
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+ ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
+ total_bytes);
if (xsk_frames)
xsk_tx_completed(pool, xsk_frames);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 296915414a7c..7ac53171b041 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -897,40 +897,41 @@ static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
return IXGBEVF_RSS_HASH_KEY_SIZE;
}
-static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ixgbevf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
int err = 0;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
- if (key)
- memcpy(key, adapter->rss_key,
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key,
ixgbevf_get_rxfh_key_size(netdev));
- if (indir) {
+ if (rxfh->indir) {
int i;
for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
}
} else {
/* If neither indirection table nor hash key was requested
* - just return a success avoiding taking any locks.
*/
- if (!indir && !key)
+ if (!rxfh->indir && !rxfh->key)
return 0;
spin_lock_bh(&adapter->mbx_lock);
- if (indir)
- err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+ if (rxfh->indir)
+ err = ixgbevf_get_reta_locked(&adapter->hw,
+ rxfh->indir,
adapter->num_rx_queues);
- if (!err && key)
- err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+ if (!err && rxfh->key)
+ err = ixgbevf_get_rss_key_locked(&adapter->hw,
+ rxfh->key);
spin_unlock_bh(&adapter->mbx_lock);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a44e4bd56142..b938dc06045d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4292,7 +4292,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
@@ -4300,7 +4300,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
+static int ixgbevf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -4317,7 +4317,7 @@ static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused ixgbevf_resume(struct device *dev_d)
+static int ixgbevf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4413,7 +4413,7 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -4854,7 +4854,7 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
.resume = ixgbevf_io_resume,
};
-static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
@@ -4863,7 +4863,7 @@ static struct pci_driver ixgbevf_driver = {
.remove = ixgbevf_remove,
/* Power Management Hooks */
- .driver.pm = &ixgbevf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&ixgbevf_pm_ops),
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig
new file mode 100644
index 000000000000..480293b71dbc
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config LIBETH
+ tristate
+ select PAGE_POOL
+ help
+ libeth is a common library containing routines shared between several
+ drivers, but not yet promoted to the generic kernel API.
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
new file mode 100644
index 000000000000..cb99203d1dd2
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+obj-$(CONFIG_LIBETH) += libeth.o
+
+libeth-objs += rx.o
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
new file mode 100644
index 000000000000..6221b88c34ac
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include <net/libeth/rx.h>
+
+/* Rx buffer management */
+
+/**
+ * libeth_rx_hw_len - get the actual buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW accounting:
+ * MTU the @dev has, HW required alignment, minimum and maximum allowed values,
+ * and system's page size.
+ */
+static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
+{
+ u32 len;
+
+ len = READ_ONCE(pp->netdev->mtu) + LIBETH_RX_LL_LEN;
+ len = ALIGN(len, LIBETH_RX_BUF_STRIDE);
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_fq_create - create a PP with the default libeth settings
+ * @fq: buffer queue struct to fill
+ * @napi: &napi_struct covering this PP (no usage outside its poll loops)
+ *
+ * Return: %0 on success, -%errno on failure.
+ */
+int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
+{
+ struct page_pool_params pp = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = LIBETH_RX_PAGE_ORDER,
+ .pool_size = fq->count,
+ .nid = fq->nid,
+ .dev = napi->dev->dev.parent,
+ .netdev = napi->dev,
+ .napi = napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = LIBETH_SKB_HEADROOM,
+ };
+ struct libeth_fqe *fqes;
+ struct page_pool *pool;
+
+ /* HW-writeable / syncable length per one page */
+ pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset);
+
+ /* HW-writeable length per buffer */
+ fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len);
+ /* Buffer size to allocate */
+ fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset +
+ fq->buf_len));
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
+ if (!fqes)
+ goto err_buf;
+
+ fq->fqes = fqes;
+ fq->pp = pool;
+
+ return 0;
+
+err_buf:
+ page_pool_destroy(pool);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH);
+
+/**
+ * libeth_rx_fq_destroy - destroy a &page_pool created by libeth
+ * @fq: buffer queue to process
+ */
+void libeth_rx_fq_destroy(struct libeth_fq *fq)
+{
+ kvfree(fq->fqes);
+ page_pool_destroy(fq->pp);
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);
+
+/**
+ * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
+ * @page: page to recycle
+ *
+ * To be used on exceptions or rare cases not requiring fast inline recycling.
+ */
+void libeth_rx_recycle_slow(struct page *page)
+{
+ page_pool_recycle_direct(page->pp, page);
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH);
+
+/* Converting abstract packet type numbers into a software structure with
+ * the packet parameters to do O(1) lookup on Rx.
+ */
+
+static const u16 libeth_rx_pt_xdp_oip[] = {
+ [LIBETH_RX_PT_OUTER_L2] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_OUTER_IPV4] = XDP_RSS_L3_IPV4,
+ [LIBETH_RX_PT_OUTER_IPV6] = XDP_RSS_L3_IPV6,
+};
+
+static const u16 libeth_rx_pt_xdp_iprot[] = {
+ [LIBETH_RX_PT_INNER_NONE] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_INNER_UDP] = XDP_RSS_L4_UDP,
+ [LIBETH_RX_PT_INNER_TCP] = XDP_RSS_L4_TCP,
+ [LIBETH_RX_PT_INNER_SCTP] = XDP_RSS_L4_SCTP,
+ [LIBETH_RX_PT_INNER_ICMP] = XDP_RSS_L4_ICMP,
+ [LIBETH_RX_PT_INNER_TIMESYNC] = XDP_RSS_TYPE_NONE,
+};
+
+static const u16 libeth_rx_pt_xdp_pl[] = {
+ [LIBETH_RX_PT_PAYLOAD_NONE] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L2] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L3] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L4] = XDP_RSS_L4,
+};
+
+/**
+ * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT
+ * @pt: PT structure to evaluate
+ *
+ * Generates ```hash_type``` field with XDP RSS type values from the parsed
+ * packet parameters if they're obtained dynamically at runtime.
+ */
+void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
+{
+ pt->hash_type = 0;
+ pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip];
+ pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
+ pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
+
+/* Module */
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Common Ethernet library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig
new file mode 100644
index 000000000000..33aff6bc8f81
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config LIBIE
+ tristate
+ select LIBETH
+ help
+ libie (Intel Ethernet library) is a common library built on top of
+ libeth and containing vendor-specific routines shared between several
+ Intel Ethernet drivers.
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
new file mode 100644
index 000000000000..bf42c5aeeedd
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+obj-$(CONFIG_LIBIE) += libie.o
+
+libie-objs += rx.o
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
new file mode 100644
index 000000000000..38201ee1e891
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include <linux/net/intel/libie/rx.h>
+
+/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
+ * bitfield struct.
+ */
+
+/* A few supplementary definitions for when XDP hash types do not coincide
+ * with what can be generated from ptype definitions by means of preprocessor
+ * concatenation.
+ */
+#define XDP_RSS_L3_L2 XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_TIMESYNC XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_L3 XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_L4 XDP_RSS_L4
+
+#define LIBIE_RX_PT(oip, ofrag, tun, tp, tefr, iprot, pl) { \
+ .outer_ip = LIBETH_RX_PT_OUTER_##oip, \
+ .outer_frag = LIBETH_RX_PT_##ofrag, \
+ .tunnel_type = LIBETH_RX_PT_TUNNEL_IP_##tun, \
+ .tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_##tp, \
+ .tunnel_end_frag = LIBETH_RX_PT_##tefr, \
+ .inner_prot = LIBETH_RX_PT_INNER_##iprot, \
+ .payload_layer = LIBETH_RX_PT_PAYLOAD_##pl, \
+ .hash_type = XDP_RSS_L3_##oip | \
+ XDP_RSS_L4_##iprot | \
+ XDP_RSS_TYPE_##pl, \
+ }
+
+#define LIBIE_RX_PT_UNUSED { }
+
+#define __LIBIE_RX_PT_L2(iprot, pl) \
+ LIBIE_RX_PT(L2, NOT_FRAG, NONE, NONE, NOT_FRAG, iprot, pl)
+#define LIBIE_RX_PT_L2 __LIBIE_RX_PT_L2(NONE, L2)
+#define LIBIE_RX_PT_TS __LIBIE_RX_PT_L2(TIMESYNC, L2)
+#define LIBIE_RX_PT_L3 __LIBIE_RX_PT_L2(NONE, L3)
+
+#define LIBIE_RX_PT_IP_FRAG(oip) \
+ LIBIE_RX_PT(IPV##oip, FRAG, NONE, NONE, NOT_FRAG, NONE, L3)
+#define LIBIE_RX_PT_IP_L3(oip, tun, teprot, tefr) \
+ LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, tefr, NONE, L3)
+#define LIBIE_RX_PT_IP_L4(oip, tun, teprot, iprot) \
+ LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, NOT_FRAG, iprot, L4)
+
+#define LIBIE_RX_PT_IP_NOF(oip, tun, ver) \
+ LIBIE_RX_PT_IP_L3(oip, tun, ver, NOT_FRAG), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, UDP), \
+ LIBIE_RX_PT_UNUSED, \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, TCP), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, SCTP), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, ICMP)
+
+/* IPv oip --> tun --> IPv ver */
+#define LIBIE_RX_PT_IP_TUN_VER(oip, tun, ver) \
+ LIBIE_RX_PT_IP_L3(oip, tun, ver, FRAG), \
+ LIBIE_RX_PT_IP_NOF(oip, tun, ver)
+
+/* Non Tunneled IPv oip */
+#define LIBIE_RX_PT_IP_RAW(oip) \
+ LIBIE_RX_PT_IP_FRAG(oip), \
+ LIBIE_RX_PT_IP_NOF(oip, NONE, NONE)
+
+/* IPv oip --> tun --> { IPv4, IPv6 } */
+#define LIBIE_RX_PT_IP_TUN(oip, tun) \
+ LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV4), \
+ LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV6)
+
+/* IPv oip --> GRE/NAT tun --> { x, IPv4, IPv6 } */
+#define LIBIE_RX_PT_IP_GRE(oip, tun) \
+ LIBIE_RX_PT_IP_L3(oip, tun, NONE, NOT_FRAG), \
+ LIBIE_RX_PT_IP_TUN(oip, tun)
+
+/* Non Tunneled IPv oip
+ * IPv oip --> { IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> { x, IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> MAC --> { x, IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> MAC/VLAN --> { x, IPv4, IPv6 }
+ */
+#define LIBIE_RX_PT_IP(oip) \
+ LIBIE_RX_PT_IP_RAW(oip), \
+ LIBIE_RX_PT_IP_TUN(oip, IP), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC_VLAN)
+
+/* Lookup table mapping for O(1) parsing */
+const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
+ /* L2 packet types */
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_TS,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+
+ LIBIE_RX_PT_IP(4),
+ LIBIE_RX_PT_IP(6),
+};
+EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Ethernet common library");
+MODULE_IMPORT_NS(LIBETH);
+MODULE_LICENSE("GPL");