aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c117
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c111
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c457
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c338
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c163
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c10
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h65
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h29
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c563
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h166
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c89
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c514
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c38
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c168
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
32 files changed, 2376 insertions, 1129 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 975eeb885ca2..ec8aa4562cc9 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -103,104 +103,104 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
-static int e1000_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 supported, advertising;
if (hw->media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->autoneg_advertised;
+ advertising |= hw->autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy_addr;
-
- if (hw->mac_type == e1000_82543)
- ecmd->transceiver = XCVR_EXTERNAL;
- else
- ecmd->transceiver = XCVR_INTERNAL;
-
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy_addr;
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
- ecmd->port = PORT_FIBRE;
-
- if (hw->mac_type >= e1000_82545)
- ecmd->transceiver = XCVR_INTERNAL;
- else
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
if (er32(STATUS) & E1000_STATUS_LU) {
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
&adapter->link_duplex);
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ cmd->base.speed = adapter->link_speed;
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
* and HALF_DUPLEX != DUPLEX_HALF
*/
if (adapter->link_duplex == FULL_DUPLEX)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+ cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) ||
hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
/* MDI-X => 1; MDI => 0 */
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+ cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
ETH_TP_MDI_X : ETH_TP_MDI);
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->mdix;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int e1000_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
@@ -209,32 +209,31 @@ static int e1000_set_settings(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->autoneg = 1;
if (hw->media_type == e1000_media_type_fiber)
hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
else
- hw->autoneg_advertised = ecmd->advertising |
+ hw->autoneg_advertised = advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
- ecmd->advertising = hw->autoneg_advertised;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
clear_bit(__E1000_RESETTING, &adapter->flags);
return -EINVAL;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->mdix = AUTO_ALL_MODES;
else
- hw->mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -1875,8 +1874,6 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
@@ -1901,6 +1898,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = e1000_get_link_ksettings,
+ .set_link_ksettings = e1000_set_link_ksettings,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 7aff68a4a4df..e70b1ebff60d 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -117,55 +117,52 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
-static int e1000_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 speed;
+ u32 speed, supported, advertising;
if (hw->phy.media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
if (hw->phy.type == e1000_phy_ife)
- ecmd->supported &= ~SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_TP;
+ supported &= ~SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
+ advertising |= hw->phy.autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- ecmd->transceiver = XCVR_INTERNAL;
-
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
speed = SPEED_UNKNOWN;
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
if (netif_running(netdev)) {
if (netif_carrier_ok(netdev)) {
speed = adapter->link_speed;
- ecmd->duplex = adapter->link_duplex - 1;
+ cmd->base.duplex = adapter->link_duplex - 1;
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
@@ -179,30 +176,36 @@ static int e1000_get_settings(struct net_device *netdev,
speed = SPEED_10;
if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
}
}
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+ cmd->base.speed = speed;
+ cmd->base.autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = hw->phy.is_mdix ?
+ ETH_TP_MDI_X : ETH_TP_MDI;
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->phy.mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
if (hw->phy.media_type != e1000_media_type_copper)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
@@ -262,12 +265,16 @@ err_inval:
return -EINVAL;
}
-static int e1000_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int e1000_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int ret_val = 0;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
pm_runtime_get_sync(netdev->dev.parent);
@@ -285,14 +292,14 @@ static int e1000_set_settings(struct net_device *netdev,
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = -EOPNOTSUPP;
goto out;
}
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
ret_val = -EINVAL;
goto out;
@@ -302,35 +309,35 @@ static int e1000_set_settings(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber)
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE | ADVERTISED_Autoneg;
else
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_TP | ADVERTISED_Autoneg;
- ecmd->advertising = hw->phy.autoneg_advertised;
+ advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
ret_val = -EINVAL;
goto out;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
/* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->phy.mdix = AUTO_ALL_MODES;
else
- hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -2313,8 +2320,6 @@ static int e1000e_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
@@ -2342,6 +2347,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_ts_info = e1000e_get_ts_info,
.get_eee = e1000e_get_eee,
.set_eee = e1000e_set_eee,
+ .get_link_ksettings = e1000_get_link_ksettings,
+ .set_link_ksettings = e1000_set_link_ksettings,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 01db688cf539..72481670478c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1226,7 +1226,9 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return fm10k_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return fm10k_setup_tc(dev, tc->mqprio->num_tc);
}
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 82d8040fa418..c0f2286c2b72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -208,8 +208,8 @@ struct i40e_fdir_filter {
u8 flow_type;
u8 ip4_proto;
/* TX packet view of src and dst */
- __be32 dst_ip[4];
- __be32 src_ip[4];
+ __be32 dst_ip;
+ __be32 src_ip;
__be16 src_port;
__be16 dst_port;
__be32 sctp_v_tag;
@@ -244,7 +244,8 @@ struct i40e_tc_configuration {
};
struct i40e_udp_port_config {
- __be16 index;
+ /* AdminQ command interface expects port number in Host byte order */
+ u16 index;
u8 type;
};
@@ -285,7 +286,14 @@ struct i40e_pf {
u32 fd_flush_cnt;
u32 fd_add_err;
u32 fd_atr_cnt;
- u32 fd_tcp_rule;
+
+ /* Book-keeping of side-band filter count per flow-type.
+ * This is used to detect and handle input set changes for
+ * respective flow-type.
+ */
+ u16 fd_tcp4_filter_cnt;
+ u16 fd_udp4_filter_cnt;
+ u16 fd_ip4_filter_cnt;
struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_udp_bitmap;
@@ -348,16 +356,23 @@ struct i40e_pf {
#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52)
#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53)
-#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54)
+#define I40E_FLAG_CLIENT_RESET BIT_ULL(54)
#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
+#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56)
+#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57)
- /* tracks features that get auto disabled by errors */
- u64 auto_disable_flags;
+ /* Tracks features that are disabled due to hw limitations.
+ * If a bit is set here, it means that the corresponding
+ * bit in the 'flags' field is cleared i.e that feature
+ * is disabled
+ */
+ u64 hw_disabled_flags;
#ifdef I40E_FCOE
struct i40e_fcoe fcoe;
#endif /* I40E_FCOE */
+ struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
@@ -813,8 +828,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
-int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
- enum i40e_client_type type);
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id);
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
* @vsi: pointer to a vsi
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 451f48b7540a..251074c677c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,6 +185,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -563,6 +572,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -645,6 +704,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -696,6 +757,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -1844,11 +1906,12 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index d570219efd9f..a9f0d22a7cf4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -32,16 +32,10 @@
#include "i40e_client.h"
static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
-
+static struct i40e_client *registered_client;
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
-static LIST_HEAD(i40e_clients);
-static DEFINE_MUTEX(i40e_client_mutex);
-
-static LIST_HEAD(i40e_client_instances);
-static DEFINE_MUTEX(i40e_client_instance_mutex);
-
static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u32 vf_id, u8 *msg, u16 len);
@@ -67,28 +61,6 @@ static struct i40e_ops i40e_lan_ops = {
};
/**
- * i40e_client_type_to_vsi_type - convert client type to vsi type
- * @client_type: the i40e_client type
- *
- * returns the related vsi type value
- **/
-static
-enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
-{
- switch (type) {
- case I40E_CLIENT_IWARP:
- return I40E_VSI_IWARP;
-
- case I40E_CLIENT_VMDQ2:
- return I40E_VSI_VMDQ2;
-
- default:
- pr_err("i40e: Client type unknown\n");
- return I40E_VSI_TYPE_UNKNOWN;
- }
-}
-
-/**
* i40e_client_get_params - Get the params that can change at runtime
* @vsi: the VSI with the message
* @param: clinet param struct
@@ -134,31 +106,22 @@ int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
void
i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!vsi)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->virtchnl_receive) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance virtual channel receive routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort virtchnl_receive\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == vsi->back) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->virtchnl_receive) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance virtual channel receive routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n");
- continue;
- }
- cdev->client->ops->virtchnl_receive(&cdev->lan_info,
- cdev->client,
- vf_id, msg, len);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client,
+ vf_id, msg, len);
}
/**
@@ -169,39 +132,28 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
**/
void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
struct i40e_params params;
- if (!vsi)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == vsi->back) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->l2_param_change) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance l2_param_change routine\n");
- continue;
- }
- memset(&params, 0, sizeof(params));
- i40e_client_get_params(vsi, &params);
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
- continue;
- }
- cdev->lan_info.params = params;
- cdev->client->ops->l2_param_change(&cdev->lan_info,
- cdev->client,
- &params);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params));
+ cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client,
+ &params);
}
/**
- * i40e_client_release_qvlist
+ * i40e_client_release_qvlist - release MSI-X vector mapping for client
* @ldev: pointer to L2 context.
*
**/
@@ -237,26 +189,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
**/
void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
{
- struct i40e_client_instance *cdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!vsi)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close routine\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.netdev == vsi->netdev) {
- if (!cdev->client ||
- !cdev->client->ops || !cdev->client->ops->close) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance close routine\n");
- continue;
- }
- cdev->client->ops->close(&cdev->lan_info, cdev->client,
- reset);
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
- i40e_client_release_qvlist(&cdev->lan_info);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ i40e_client_release_qvlist(&cdev->lan_info);
}
/**
@@ -268,30 +213,20 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
**/
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!pf)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->vf_reset) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF reset routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_reset) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF reset routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n");
- continue;
- }
- cdev->client->ops->vf_reset(&cdev->lan_info,
- cdev->client, vf_id);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id);
}
/**
@@ -303,30 +238,21 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
**/
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
- if (!pf)
+ if (!cdev || !cdev->client)
+ return;
+ if (!cdev->client->ops || !cdev->client->ops->vf_enable) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF enable routine\n");
+ return;
+ }
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
return;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_enable) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF enable routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n");
- continue;
- }
- cdev->client->ops->vf_enable(&cdev->lan_info,
- cdev->client, num_vfs);
- }
}
- mutex_unlock(&i40e_client_instance_mutex);
+ cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs);
}
/**
@@ -337,37 +263,25 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
* If there is a client of the specified type attached to this PF, call
* its vf_capable routine
**/
-int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
- enum i40e_client_type type)
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = pf->cinst;
int capable = false;
- if (!pf)
- return false;
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if (cdev->lan_info.pf == pf) {
- if (!cdev->client ||
- !cdev->client->ops ||
- !cdev->client->ops->vf_capable ||
- !(cdev->client->type == type)) {
- dev_dbg(&pf->pdev->dev,
- "Cannot locate client instance VF capability routine\n");
- continue;
- }
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n");
- continue;
- }
- capable = cdev->client->ops->vf_capable(&cdev->lan_info,
- cdev->client,
- vf_id);
- break;
- }
+ if (!cdev || !cdev->client)
+ goto out;
+ if (!cdev->client->ops || !cdev->client->ops->vf_capable) {
+ dev_info(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
+ goto out;
}
- mutex_unlock(&i40e_client_instance_mutex);
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state))
+ goto out;
+
+ capable = cdev->client->ops->vf_capable(&cdev->lan_info,
+ cdev->client,
+ vf_id);
+out:
return capable;
}
@@ -377,27 +291,19 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
* @client: pointer to a client struct in the client list.
* @existing: if there was already an existing instance
*
- * Returns cdev ptr on success or if already exists, NULL on failure
**/
-static
-struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
- struct i40e_client *client,
- bool *existing)
+static void i40e_client_add_instance(struct i40e_pf *pf)
{
- struct i40e_client_instance *cdev;
+ struct i40e_client_instance *cdev = NULL;
struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry(cdev, &i40e_client_instances, list) {
- if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
- *existing = true;
- goto out;
- }
- }
+ if (!registered_client || pf->cinst)
+ return;
+
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
- goto out;
+ return;
cdev->lan_info.pf = (void *)pf;
cdev->lan_info.netdev = vsi->netdev;
@@ -417,7 +323,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
kfree(cdev);
cdev = NULL;
- goto out;
+ return;
}
cdev->lan_info.msix_count = pf->num_iwarp_msix;
@@ -430,41 +336,20 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
else
dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
- cdev->client = client;
- INIT_LIST_HEAD(&cdev->list);
- list_add(&cdev->list, &i40e_client_instances);
-out:
- mutex_unlock(&i40e_client_instance_mutex);
- return cdev;
+ cdev->client = registered_client;
+ pf->cinst = cdev;
}
/**
* i40e_client_del_instance - removes a client instance from the list
* @pf: pointer to the board struct
*
- * Returns 0 on success or non-0 on error
**/
static
-int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
+void i40e_client_del_instance(struct i40e_pf *pf)
{
- struct i40e_client_instance *cdev, *tmp;
- int ret = -ENODEV;
-
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
- if ((cdev->lan_info.pf != pf) || (cdev->client != client))
- continue;
-
- dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
- list_del(&cdev->list);
- kfree(cdev);
- ret = 0;
- break;
- }
- mutex_unlock(&i40e_client_instance_mutex);
- return ret;
+ kfree(pf->cinst);
+ pf->cinst = NULL;
}
/**
@@ -473,67 +358,50 @@ int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
+ struct i40e_client *client = registered_client;
struct i40e_client_instance *cdev;
- struct i40e_client *client;
- bool existing = false;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
return;
pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ cdev = pf->cinst;
/* If we're down or resetting, just bail */
if (test_bit(__I40E_DOWN, &pf->state) ||
test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
- /* Check client state and instantiate client if client registered */
- mutex_lock(&i40e_client_mutex);
- list_for_each_entry(client, &i40e_clients, list) {
- /* first check client is registered */
- if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
- continue;
-
- /* Do we also need the LAN VSI to be up, to create instance */
- if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
- /* check if L2 VSI is up, if not we are not ready */
- if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
- continue;
- } else {
- dev_warn(&pf->pdev->dev, "This client %s is being instantiated at probe\n",
- client->name);
- }
-
- /* Add the client instance to the instance list */
- cdev = i40e_client_add_instance(pf, client, &existing);
- if (!cdev)
- continue;
-
- if (!existing) {
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.bus_id, pf->hw.bus.device,
- pf->hw.bus.func);
- }
+ if (!client || !cdev)
+ return;
- mutex_lock(&i40e_client_instance_mutex);
- if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state)) {
- /* Send an Open request to the client */
- if (client->ops && client->ops->open)
- ret = client->ops->open(&cdev->lan_info,
- client);
- if (!ret) {
- set_bit(__I40E_CLIENT_INSTANCE_OPENED,
- &cdev->state);
- } else {
- /* remove client instance */
- i40e_client_del_instance(pf, client);
+ /* Here we handle client opens. If the client is down, but
+ * the netdev is up, then open the client.
+ */
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ if (!test_bit(__I40E_DOWN, &vsi->state) &&
+ client->ops && client->ops->open) {
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ ret = client->ops->open(&cdev->lan_info, client);
+ if (ret) {
+ /* Remove failed client instance */
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ i40e_client_del_instance(pf);
}
}
- mutex_unlock(&i40e_client_instance_mutex);
+ } else {
+ /* Likewise for client close. If the client is up, but the netdev
+ * is down, then close the client.
+ */
+ if (test_bit(__I40E_DOWN, &vsi->state) &&
+ client->ops && client->ops->close) {
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ client->ops->close(&cdev->lan_info, client, false);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ }
}
- mutex_unlock(&i40e_client_mutex);
}
/**
@@ -601,7 +469,6 @@ int i40e_lan_del_device(struct i40e_pf *pf)
break;
}
}
-
mutex_unlock(&i40e_device_mutex);
return ret;
}
@@ -610,22 +477,24 @@ int i40e_lan_del_device(struct i40e_pf *pf)
* i40e_client_release - release client specific resources
* @client: pointer to the registered client
*
- * Return 0 on success or < 0 on error
**/
-static int i40e_client_release(struct i40e_client *client)
+static void i40e_client_release(struct i40e_client *client)
{
- struct i40e_client_instance *cdev, *tmp;
+ struct i40e_client_instance *cdev;
+ struct i40e_device *ldev;
struct i40e_pf *pf;
- int ret = 0;
- LIST_HEAD(cdevs_tmp);
-
- mutex_lock(&i40e_client_instance_mutex);
- list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
- if (strncmp(cdev->client->name, client->name,
- I40E_CLIENT_STR_LENGTH))
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
+ pf = ldev->pf;
+ cdev = pf->cinst;
+ if (!cdev)
continue;
- pf = (struct i40e_pf *)cdev->lan_info.pf;
+
+ while (test_and_set_bit(__I40E_SERVICE_SCHED,
+ &pf->state))
+ usleep_range(500, 1000);
+
if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
if (client->ops && client->ops->close)
client->ops->close(&cdev->lan_info, client,
@@ -637,18 +506,13 @@ static int i40e_client_release(struct i40e_client *client)
"Client %s instance for PF id %d closed\n",
client->name, pf->hw.pf_id);
}
- /* delete the client instance from the list */
- list_move(&cdev->list, &cdevs_tmp);
+ /* delete the client instance */
+ i40e_client_del_instance(pf);
dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
}
- mutex_unlock(&i40e_client_instance_mutex);
-
- /* free the client device and release its vsi */
- list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
- kfree(cdev);
- }
- return ret;
+ mutex_unlock(&i40e_device_mutex);
}
/**
@@ -664,6 +528,7 @@ static void i40e_client_prepare(struct i40e_client *client)
mutex_lock(&i40e_device_mutex);
list_for_each_entry(ldev, &i40e_devices, list) {
pf = ldev->pf;
+ i40e_client_add_instance(pf);
/* Start the client subtask */
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
@@ -792,8 +657,8 @@ static void i40e_client_request_reset(struct i40e_info *ldev,
break;
default:
dev_warn(&pf->pdev->dev,
- "Client %s instance for PF id %d request an unsupported reset: %d.\n",
- client->name, pf->hw.pf_id, reset_level);
+ "Client for PF id %d requested an unsupported reset: %d.\n",
+ pf->hw.pf_id, reset_level);
break;
}
@@ -852,8 +717,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
} else {
update = false;
dev_warn(&pf->pdev->dev,
- "Client %s instance for PF id %d request an unsupported Config: %x.\n",
- client->name, pf->hw.pf_id, flag);
+ "Client for PF id %d request an unsupported Config: %x.\n",
+ pf->hw.pf_id, flag);
}
if (update) {
@@ -878,7 +743,6 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
int i40e_register_client(struct i40e_client *client)
{
int ret = 0;
- enum i40e_vsi_type vsi_type;
if (!client) {
ret = -EIO;
@@ -891,11 +755,9 @@ int i40e_register_client(struct i40e_client *client)
goto out;
}
- mutex_lock(&i40e_client_mutex);
- if (i40e_client_is_registered(client)) {
+ if (registered_client) {
pr_info("i40e: Client %s has already been registered!\n",
client->name);
- mutex_unlock(&i40e_client_mutex);
ret = -EEXIST;
goto out;
}
@@ -908,22 +770,11 @@ int i40e_register_client(struct i40e_client *client)
client->version.major, client->version.minor,
client->version.build,
i40e_client_interface_version_str);
- mutex_unlock(&i40e_client_mutex);
ret = -EIO;
goto out;
}
- vsi_type = i40e_client_type_to_vsi_type(client->type);
- if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
- pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
- client->name, client->type);
- mutex_unlock(&i40e_client_mutex);
- ret = -EIO;
- goto out;
- }
- list_add(&client->list, &i40e_clients);
- set_bit(__I40E_CLIENT_REGISTERED, &client->state);
- mutex_unlock(&i40e_client_mutex);
+ registered_client = client;
i40e_client_prepare(client);
@@ -943,29 +794,21 @@ int i40e_unregister_client(struct i40e_client *client)
{
int ret = 0;
- /* When a unregister request comes through we would have to send
- * a close for each of the client instances that were opened.
- * client_release function is called to handle this.
- */
- mutex_lock(&i40e_client_mutex);
- if (!client || i40e_client_release(client)) {
- ret = -EIO;
- goto out;
- }
-
- /* TODO: check if device is in reset, or if that matters? */
- if (!i40e_client_is_registered(client)) {
+ if (registered_client != client) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
ret = -ENODEV;
goto out;
}
- clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
- list_del(&client->list);
- pr_info("i40e: Unregistered client %s with return code %d\n",
- client->name, ret);
+ registered_client = NULL;
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ i40e_client_release(client);
+
+ pr_info("i40e: Unregistered client %s\n", client->name);
out:
- mutex_unlock(&i40e_client_mutex);
return ret;
}
EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 528bd79b05fe..15b21a5315b5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -57,11 +57,6 @@ enum i40e_client_instance_state {
__I40E_CLIENT_INSTANCE_OPENED,
};
-enum i40e_client_type {
- I40E_CLIENT_IWARP,
- I40E_CLIENT_VMDQ2
-};
-
struct i40e_ops;
struct i40e_client;
@@ -214,7 +209,8 @@ struct i40e_client {
u32 flags;
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
- enum i40e_client_type type;
+ u8 type;
+#define I40E_CLIENT_IWARP 0
const struct i40e_client_ops *ops; /* client ops provided by the client */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index a22e26200bcc..1c3805b4fcf3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -387,7 +387,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
*
**/
static void i40e_get_settings_link_up(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd,
+ struct ethtool_link_ksettings *cmd,
struct net_device *netdev,
struct i40e_pf *pf)
{
@@ -395,90 +395,96 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
u32 link_speed = hw_link_info->link_speed;
u32 e_advertising = 0x0;
u32 e_supported = 0x0;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_40GBASE_CR4_CU:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_40000baseCR4_Full;
break;
case I40E_PHY_TYPE_XLAUI:
case I40E_PHY_TYPE_XLPPI:
case I40E_PHY_TYPE_40GBASE_AOC:
- ecmd->supported = SUPPORTED_40000baseCR4_Full;
+ supported = SUPPORTED_40000baseCR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_SR4:
- ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ supported = SUPPORTED_40000baseSR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_LR4:
- ecmd->supported = SUPPORTED_40000baseLR4_Full;
+ supported = SUPPORTED_40000baseLR4_Full;
break;
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
- ecmd->supported = SUPPORTED_10000baseT_Full;
+ supported = SUPPORTED_10000baseT_Full;
if (hw_link_info->module_type[2] &
I40E_MODULE_TYPE_1000BASE_SX ||
hw_link_info->module_type[2] &
I40E_MODULE_TYPE_1000BASE_LX) {
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ supported |= SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ advertising |= ADVERTISED_10000baseT_Full;
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
+ advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full;
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
case I40E_PHY_TYPE_SFI:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_10GBASE_AOC:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = SUPPORTED_10000baseT_Full;
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = SUPPORTED_10000baseT_Full;
break;
case I40E_PHY_TYPE_SGMII:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
+ supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
}
break;
case I40E_PHY_TYPE_40GBASE_KR4:
@@ -486,25 +492,25 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
- ecmd->supported |= SUPPORTED_40000baseKR4_Full |
- SUPPORTED_20000baseKR2_Full |
- SUPPORTED_10000baseKR_Full |
- SUPPORTED_10000baseKX4_Full |
- SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
- ADVERTISED_20000baseKR2_Full |
- ADVERTISED_10000baseKR_Full |
- ADVERTISED_10000baseKX4_Full |
- ADVERTISED_1000baseKX_Full |
- ADVERTISED_Autoneg;
+ supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_10000baseKR_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
break;
case I40E_PHY_TYPE_25GBASE_KR:
case I40E_PHY_TYPE_25GBASE_CR:
case I40E_PHY_TYPE_25GBASE_SR:
case I40E_PHY_TYPE_25GBASE_LR:
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
+ supported = SUPPORTED_Autoneg;
+ advertising = ADVERTISED_Autoneg;
/* TODO: add speeds when ethtool is ready to support*/
break;
default:
@@ -520,38 +526,43 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
i40e_phy_type_to_ethtool(pf, &e_supported,
&e_advertising);
- ecmd->supported = ecmd->supported & e_supported;
- ecmd->advertising = ecmd->advertising & e_advertising;
+ supported = supported & e_supported;
+ advertising = advertising & e_advertising;
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
- ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ cmd->base.speed = SPEED_40000;
break;
case I40E_LINK_SPEED_25GB:
#ifdef SPEED_25000
- ethtool_cmd_speed_set(ecmd, SPEED_25000);
+ cmd->base.speed = SPEED_25000;
#else
netdev_info(netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
case I40E_LINK_SPEED_20GB:
- ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ cmd->base.speed = SPEED_20000;
break;
case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
}
/**
@@ -562,18 +573,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
* Reports link settings that can be determined when link is down
**/
static void i40e_get_settings_link_down(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd,
+ struct ethtool_link_ksettings *cmd,
struct i40e_pf *pf)
{
+ u32 supported, advertising;
+
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- i40e_phy_type_to_ethtool(pf, &ecmd->supported,
- &ecmd->advertising);
+ i40e_phy_type_to_ethtool(pf, &supported, &advertising);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
/* With no link speed and duplex are unknown */
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
/**
@@ -583,74 +600,85 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
*
* Reports speed/duplex settings based on media_type
**/
-static int i40e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+ u32 advertising;
if (link_up)
- i40e_get_settings_link_up(hw, ecmd, netdev, pf);
+ i40e_get_settings_link_up(hw, cmd, netdev, pf);
else
- i40e_get_settings_link_down(hw, ecmd, pf);
+ i40e_get_settings_link_down(hw, cmd, pf);
/* Now set the settings that don't rely on link being up/down */
/* Set autoneg settings */
- ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
switch (hw->phy.media_type) {
case I40E_MEDIA_TYPE_BACKPLANE:
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_Backplane;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_Backplane;
- ecmd->port = PORT_NONE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ Backplane);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Backplane);
+ cmd->base.port = PORT_NONE;
break;
case I40E_MEDIA_TYPE_BASET:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ cmd->base.port = PORT_TP;
break;
case I40E_MEDIA_TYPE_DA:
case I40E_MEDIA_TYPE_CX4:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_DA;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ cmd->base.port = PORT_DA;
break;
case I40E_MEDIA_TYPE_FIBER:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ cmd->base.port = PORT_FIBRE;
break;
case I40E_MEDIA_TYPE_UNKNOWN:
default:
- ecmd->port = PORT_OTHER;
+ cmd->base.port = PORT_OTHER;
break;
}
- /* Set transceiver */
- ecmd->transceiver = XCVR_EXTERNAL;
-
/* Set flow control settings */
- ecmd->supported |= SUPPORTED_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
- ecmd->advertising |= ADVERTISED_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
break;
case I40E_FC_TX_PAUSE:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
break;
case I40E_FC_RX_PAUSE:
- ecmd->advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, cmd->link_modes.advertising);
+
+ advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
break;
}
@@ -664,8 +692,8 @@ static int i40e_get_settings(struct net_device *netdev,
*
* Set speed/duplex per media_types advertised/forced
**/
-static int i40e_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
@@ -673,12 +701,14 @@ static int i40e_set_settings(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
- struct ethtool_cmd safe_ecmd;
+ struct ethtool_link_ksettings safe_cmd;
+ struct ethtool_link_ksettings copy_cmd;
i40e_status status = 0;
bool change = false;
int err = 0;
- u8 autoneg;
+ u32 autoneg;
u32 advertise;
+ u32 tmp;
/* Changing port settings is not supported if this isn't the
* port's controlling PF
@@ -706,23 +736,31 @@ static int i40e_set_settings(struct net_device *netdev,
return -EOPNOTSUPP;
}
+ /* copy the cmd to copy_cmd to avoid modifying the origin */
+ memcpy(&copy_cmd, cmd, sizeof(struct ethtool_link_ksettings));
+
/* get our own copy of the bits to check against */
- memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
- i40e_get_settings(netdev, &safe_ecmd);
+ memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings));
+ i40e_get_link_ksettings(netdev, &safe_cmd);
- /* save autoneg and speed out of ecmd */
- autoneg = ecmd->autoneg;
- advertise = ecmd->advertising;
+ /* save autoneg and speed out of cmd */
+ autoneg = cmd->base.autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(&advertise,
+ cmd->link_modes.advertising);
/* set autoneg and speed back to what they currently are */
- ecmd->autoneg = safe_ecmd.autoneg;
- ecmd->advertising = safe_ecmd.advertising;
+ copy_cmd.base.autoneg = safe_cmd.base.autoneg;
+ ethtool_convert_link_mode_to_legacy_u32(
+ &tmp, safe_cmd.link_modes.advertising);
+ ethtool_convert_legacy_u32_to_link_mode(
+ copy_cmd.link_modes.advertising, tmp);
+
+ copy_cmd.base.cmd = safe_cmd.base.cmd;
- ecmd->cmd = safe_ecmd.cmd;
- /* If ecmd and safe_ecmd are not the same now, then they are
+ /* If copy_cmd and safe_cmd are not the same now, then they are
* trying to set something that we do not support
*/
- if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
+ if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
return -EOPNOTSUPP;
while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
@@ -745,7 +783,8 @@ static int i40e_set_settings(struct net_device *netdev,
/* If autoneg was not already enabled */
if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
/* If autoneg is not supported, return error */
- if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+ if (!ethtool_link_ksettings_test_link_mode(
+ &safe_cmd, supported, Autoneg)) {
netdev_info(netdev, "Autoneg not supported on this phy\n");
return -EINVAL;
}
@@ -760,7 +799,8 @@ static int i40e_set_settings(struct net_device *netdev,
/* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
- if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+ if (ethtool_link_ksettings_test_link_mode(
+ &safe_cmd, supported, Autoneg) &&
hw->phy.link_info.phy_type !=
I40E_PHY_TYPE_10GBASE_T) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
@@ -773,7 +813,9 @@ static int i40e_set_settings(struct net_device *netdev,
}
}
- if (advertise & ~safe_ecmd.supported)
+ ethtool_convert_link_mode_to_legacy_u32(&tmp,
+ safe_cmd.link_modes.supported);
+ if (advertise & ~tmp)
return -EINVAL;
if (advertise & ADVERTISED_100baseT_Full)
@@ -1165,6 +1207,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
struct i40e_hw *hw = &np->vsi->back->hw;
u32 val;
+#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
+ if (hw->mac.type == I40E_MAC_X722) {
+ val = X722_EEPROM_SCOPE_LIMIT + 1;
+ return val;
+ }
val = (rd32(hw, I40E_GLPCI_LBARCTRL)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
@@ -2359,8 +2406,8 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
*/
fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -2574,24 +2621,6 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
}
/**
- * i40e_match_fdir_input_set - Match a new filter against an existing one
- * @rule: The filter already added
- * @input: The new filter to comapre against
- *
- * Returns true if the two input set match
- **/
-static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
- struct i40e_fdir_filter *input)
-{
- if ((rule->dst_ip[0] != input->dst_ip[0]) ||
- (rule->src_ip[0] != input->src_ip[0]) ||
- (rule->dst_port != input->dst_port) ||
- (rule->src_port != input->src_port))
- return false;
- return true;
-}
-
-/**
* i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
* @vsi: Pointer to the targeted VSI
* @input: The filter to update or NULL to indicate deletion
@@ -2626,22 +2655,22 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
/* if there is an old rule occupying our place remove it */
if (rule && (rule->fd_id == sw_idx)) {
- if (input && !i40e_match_fdir_input_set(rule, input))
- err = i40e_add_del_fdir(vsi, rule, false);
- else if (!input)
- err = i40e_add_del_fdir(vsi, rule, false);
+ /* Remove this rule, since we're either deleting it, or
+ * replacing it.
+ */
+ err = i40e_add_del_fdir(vsi, rule, false);
hlist_del(&rule->fdir_node);
kfree(rule);
pf->fdir_pf_active_filters--;
}
- /* If no input this was a delete, err should be 0 if a rule was
- * successfully found and removed from the list else -EINVAL
+ /* If we weren't given an input, this is a delete, so just return the
+ * error code indicating if there was an entry at the requested slot
*/
if (!input)
return err;
- /* initialize node and set software index */
+ /* Otherwise, install the new rule as requested */
INIT_HLIST_NODE(&input->fdir_node);
/* add filter to the list */
@@ -2712,7 +2741,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP;
- if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+ if (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)
return -ENOSPC;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
@@ -2724,6 +2753,10 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ /* Extended MAC field is not supported */
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
pf->hw.func_caps.fd_filters_guaranteed)) {
return -EINVAL;
@@ -2760,8 +2793,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
*/
input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
- input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
- input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
if (ntohl(fsp->m_ext.data[1])) {
vf_id = ntohl(fsp->h_ext.data[1]);
@@ -2781,12 +2814,19 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
}
ret = i40e_add_del_fdir(vsi, input, true);
-free_input:
if (ret)
- kfree(input);
- else
- i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+ goto free_input;
+
+ /* Add the input filter to the fdir_input_list, possibly replacing
+ * a previous filter. Do not free the input structure after adding it
+ * to the list as this would cause a use-after-free bug.
+ */
+ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+ return 0;
+
+free_input:
+ kfree(input);
return ret;
}
@@ -3054,7 +3094,7 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
I40E_PRIV_FLAGS_FD_ATR : 0;
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0;
- ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
+ ret_flags |= pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
if (pf->hw.pf_id == 0) {
ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
@@ -3094,7 +3134,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush current ATR settings */
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
@@ -3139,9 +3179,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
(pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
- pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->hw_disabled_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
else
- pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->hw_disabled_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
/* if needed, issue reset to cause things to take effect */
if (reset_required)
@@ -3151,8 +3191,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
}
static const struct ethtool_ops i40e_ethtool_ops = {
- .get_settings = i40e_get_settings,
- .set_settings = i40e_set_settings,
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
.get_regs = i40e_get_regs,
@@ -3189,6 +3227,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_priv_flags = i40e_set_priv_flags,
.get_per_queue_coalesce = i40e_get_per_queue_coalesce,
.set_per_queue_coalesce = i40e_set_per_queue_coalesce,
+ .get_link_ksettings = i40e_get_link_ksettings,
+ .set_link_ksettings = i40e_set_link_ksettings,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e8a8351c8ea9..caccb8e97f1b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,9 +39,9 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 7
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1101,13 +1101,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_lpi_count, &nsd->rx_lpi_count);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
- !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ !(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED))
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
- !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ !(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
nsd->fd_atr_status = true;
else
nsd->fd_atr_status = false;
@@ -2487,13 +2487,15 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
netdev_info(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
- i40e_notify_client_of_l2_param_changes(vsi);
+ pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+ I40E_FLAG_CLIENT_L2_CHANGE);
return 0;
}
@@ -3281,6 +3283,11 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
+ /* Reset FDir counters as we're replaying all existing filters */
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+
hlist_for_each_entry_safe(filter, node,
&pf->fdir_filter_list, fdir_node) {
i40e_add_del_fdir(vsi, filter, true);
@@ -4463,17 +4470,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
**/
static void i40e_vsi_close(struct i40e_vsi *vsi)
{
- bool reset = false;
-
+ struct i40e_pf *pf = vsi->back;
if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
i40e_down(vsi);
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0;
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
- reset = true;
- i40e_notify_client_of_netdev_close(vsi, reset);
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ pf->flags |= I40E_FLAG_CLIENT_RESET;
}
/**
@@ -5464,13 +5470,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* replay FDIR SB filters */
if (vsi->type == I40E_VSI_FDIR) {
/* reset fd counters */
- pf->fd_add_err = pf->fd_atr_cnt = 0;
- if (pf->fd_tcp_rule > 0) {
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
- pf->fd_tcp_rule = 0;
- }
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
i40e_fdir_filter_restore(vsi);
}
@@ -5542,8 +5543,6 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
- i40e_notify_client_of_netdev_close(vsi, false);
-
}
/**
@@ -5612,9 +5611,12 @@ static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
#endif
{
- if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return i40e_setup_tc(netdev, tc->tc);
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return i40e_setup_tc(netdev, tc->mqprio->num_tc);
}
/**
@@ -5752,7 +5754,11 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
hlist_del(&filter->fdir_node);
kfree(filter);
}
+
pf->fdir_pf_active_filters = 0;
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
}
/**
@@ -6021,8 +6027,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
- /* Notify the client for the DCB changes */
- i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
+ pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+ I40E_FLAG_CLIENT_L2_CHANGE);
}
exit:
@@ -6144,8 +6150,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
(pf->fd_add_err == 0) ||
(i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
@@ -6156,9 +6162,9 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
*/
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->fd_tcp_rule == 0)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->fd_tcp4_filter_cnt == 0)) {
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
@@ -6210,7 +6216,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6229,8 +6235,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
- if (!disable_atr)
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ if (!disable_atr && !pf->fd_tcp4_filter_cnt)
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -7351,7 +7357,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- __be16 port;
+ u16 port;
int i;
if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
@@ -7375,7 +7381,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
"%s %s port %d, index %d failed, err %s aq_err %s\n",
pf->udp_ports[i].type ? "vxlan" : "geneve",
port ? "add" : "delete",
- ntohs(port), i,
+ port, i,
i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
@@ -7411,7 +7417,18 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
- i40e_client_subtask(pf);
+ if (pf->flags & I40E_FLAG_CLIENT_RESET) {
+ /* Client subtask will reopen next time through. */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+ pf->flags &= ~I40E_FLAG_CLIENT_RESET;
+ } else {
+ i40e_client_subtask(pf);
+ if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
+ i40e_notify_client_of_l2_param_changes(
+ pf->vsi[pf->lan_vsi]);
+ pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
+ }
+ }
i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
@@ -7809,6 +7826,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
static int i40e_init_msix(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ int cpus, extra_vectors;
int vectors_left;
int v_budget, i;
int v_actual;
@@ -7844,10 +7862,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left--;
}
- /* reserve vectors for the main PF traffic queues */
- pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+ /* reserve some vectors for the main PF traffic queues. Initially we
+ * only reserve at most 50% of the available vectors, in the case that
+ * the number of online CPUs is large. This ensures that we can enable
+ * extra features as well. Once we've enabled the other features, we
+ * will use any remaining vectors to reach as close as we can to the
+ * number of online CPUs.
+ */
+ cpus = num_online_cpus();
+ pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
vectors_left -= pf->num_lan_msix;
- v_budget += pf->num_lan_msix;
/* reserve one vector for sideband flow director */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -7910,6 +7934,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ /* On systems with a large number of SMP cores, we previously limited
+ * the number of vectors for num_lan_msix to be at most 50% of the
+ * available vectors, to allow for other features. Now, we add back
+ * the remaining vectors. However, we ensure that the total
+ * num_lan_msix will not exceed num_online_cpus(). To do this, we
+ * calculate the number of vectors we can add without going over the
+ * cap of CPUs. For systems with a small number of CPUs this will be
+ * zero.
+ */
+ extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+ pf->num_lan_msix += extra_vectors;
+ vectors_left -= extra_vectors;
+
+ WARN(vectors_left < 0,
+ "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+ v_budget += pf->num_lan_msix;
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
if (!pf->msix_entries)
@@ -8360,13 +8401,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
if (vsi->type == I40E_VSI_MAIN) {
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
- seed_dw[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
} else if (vsi->type == I40E_VSI_SRIOV) {
for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HKEY1(i, vf_id),
- seed_dw[i]);
+ wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
}
@@ -8384,9 +8422,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
return -EINVAL;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw,
- I40E_VFQF_HLUT1(i, vf_id),
- lut_dw[i]);
+ wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
} else {
dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
}
@@ -8843,9 +8879,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.aq.api_min_ver > 4))) {
/* Supported in FW API version higher than 1.4 */
pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
} else {
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
}
pf->eeprom_version = 0xDEAD;
@@ -8906,14 +8942,14 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
i40e_fdir_filter_exit(pf);
}
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
/* reset fd counters */
- pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
- pf->fdir_pf_active_filters = 0;
+ pf->fd_add_err = 0;
+ pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
}
@@ -8982,7 +9018,7 @@ static int i40e_set_features(struct net_device *netdev,
*
* Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
**/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
{
u8 i;
@@ -9005,7 +9041,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- __be16 port = ti->port;
+ u16 port = ntohs(ti->port);
u8 next_idx;
u8 idx;
@@ -9013,8 +9049,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "port %d already offloaded\n",
- ntohs(port));
+ netdev_info(netdev, "port %d already offloaded\n", port);
return;
}
@@ -9023,7 +9058,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
- ntohs(port));
+ port);
return;
}
@@ -9057,7 +9092,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- __be16 port = ti->port;
+ u16 port = ntohs(ti->port);
u8 idx;
idx = i40e_get_udp_port_idx(pf, port);
@@ -9089,7 +9124,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
return;
not_found:
netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
- ntohs(port));
+ port);
}
static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -9432,10 +9467,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
- /* The following steps are necessary to prevent reception
- * of tagged packets - some older NVM configurations load a
- * default a MAC-VLAN filter that accepts any tagged packet
- * which must be replaced by a normal filter.
+ /* The following steps are necessary to properly keep track of
+ * MAC-VLAN filters loaded into firmware - first we remove
+ * filter that is automatically generated by firmware and then
+ * add new filter both to the driver hash table and firmware.
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 38ee18f11124..800bd55d0159 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 97d46058d71d..3880e417f167 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -203,7 +203,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
struct udphdr *udp;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
@@ -219,9 +218,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
+ sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip[0];
+ ip->daddr = fd_data->dst_ip;
udp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip[0];
+ ip->saddr = fd_data->src_ip;
udp->source = fd_data->src_port;
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
@@ -230,7 +229,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* Free the packet buffer since it wasn't added to the ring */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
@@ -241,10 +242,13 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
"Filter deleted for PCTYPE %d loc = %d\n",
fd_data->pctype, fd_data->fd_id);
}
- if (err)
- kfree(raw_packet);
- return err ? -EOPNOTSUPP : 0;
+ if (add)
+ pf->fd_udp4_filter_cnt++;
+ else
+ pf->fd_udp4_filter_cnt--;
+
+ return 0;
}
#define I40E_TCPIP_DUMMY_PACKET_LEN 54
@@ -263,7 +267,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
struct tcphdr *tcp;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
/* Dummy packet */
@@ -281,36 +284,20 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
+ sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip[0];
+ ip->daddr = fd_data->dst_ip;
tcp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip[0];
+ ip->saddr = fd_data->src_ip;
tcp->source = fd_data->src_port;
- if (add) {
- pf->fd_tcp_rule++;
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
- } else {
- pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
- (pf->fd_tcp_rule - 1) : 0;
- if (pf->fd_tcp_rule == 0) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
- }
-
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* Free the packet buffer since it wasn't added to the ring */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
@@ -321,10 +308,23 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
fd_data->pctype, fd_data->fd_id);
}
- if (err)
- kfree(raw_packet);
+ if (add) {
+ pf->fd_tcp4_filter_cnt++;
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ } else {
+ pf->fd_tcp4_filter_cnt--;
+ if (pf->fd_tcp4_filter_cnt == 0) {
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ }
+ }
- return err ? -EOPNOTSUPP : 0;
+ return 0;
}
#define I40E_IP_DUMMY_PACKET_LEN 34
@@ -343,7 +343,6 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
{
struct i40e_pf *pf = vsi->back;
struct iphdr *ip;
- bool err = false;
u8 *raw_packet;
int ret;
int i;
@@ -359,18 +358,21 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- ip->saddr = fd_data->src_ip[0];
- ip->daddr = fd_data->dst_ip[0];
+ ip->saddr = fd_data->src_ip;
+ ip->daddr = fd_data->dst_ip;
ip->protocol = 0;
fd_data->pctype = i;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
- err = true;
+ /* The packet buffer wasn't added to the ring so we
+ * need to free it now.
+ */
+ kfree(raw_packet);
+ return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
@@ -383,10 +385,12 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
}
}
- if (err)
- kfree(raw_packet);
+ if (add)
+ pf->fd_ip4_filter_cnt++;
+ else
+ pf->fd_ip4_filter_cnt--;
- return err ? -EOPNOTSUPP : 0;
+ return 0;
}
/**
@@ -484,8 +488,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
- pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
}
@@ -498,11 +502,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- !(pf->auto_disable_flags &
+ !(pf->hw_disabled_flags &
I40E_FLAG_FD_SB_ENABLED)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
- pf->auto_disable_flags |=
+ pf->hw_disabled_flags |=
I40E_FLAG_FD_SB_ENABLED;
}
}
@@ -1010,7 +1014,6 @@ err:
**/
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
u16 i;
@@ -1030,7 +1033,20 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
__free_pages(rx_bi->page, 0);
rx_bi->page = NULL;
@@ -1159,7 +1175,10 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -1219,6 +1238,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -1685,8 +1710,8 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
}
/* clear contents of buffer_info */
@@ -2079,7 +2104,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
- if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
/* if sampling is disabled do nothing */
@@ -2113,10 +2138,10 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
- if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
- (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
+ (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
@@ -2179,7 +2204,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
- (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
+ (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index f80979025c01..49c7b2089d8e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -133,6 +133,9 @@ enum i40e_dyn_idx_t {
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78460c52b7c4..cfe8b78dac0e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -702,10 +702,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
- (u32)hena);
- i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
- (u32)(hena >> 32));
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
}
/* program mac filter */
@@ -1359,7 +1357,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
- if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
+ if (i40e_vf_client_capable(pf, vf->vf_id) &&
(vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
@@ -1853,7 +1851,7 @@ error_param:
}
/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
#define I40E_VC_MAX_VLAN_PER_VF 8
/**
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index 3a423836a565..827c7a6ed0ba 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -32,5 +32,5 @@
obj-$(CONFIG_I40EVF) += i40evf.o
i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
- i40e_txrx.o i40e_common.o i40e_adminq.o
+ i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index eeb9864bc5b1..c28cb8f27243 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,6 +185,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -558,6 +567,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -640,6 +699,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -691,6 +752,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -1839,11 +1901,12 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index c91fcf43ccbc..d7790c08e523 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -493,7 +493,6 @@ err:
**/
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
u16 i;
@@ -513,7 +512,20 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
__free_pages(rx_bi->page, 0);
rx_bi->page = NULL;
@@ -642,7 +654,10 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -702,6 +717,12 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -1158,8 +1179,8 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
}
/* clear contents of buffer_info */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8274ba68bd32..013512124e6a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -120,6 +120,9 @@ enum i40e_dyn_idx_t {
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index d38a2b2aea2b..f431fbc4a3e7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,7 +81,9 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
@@ -393,6 +395,37 @@ struct i40e_virtchnl_pf_event {
int severity;
};
+/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct i40e_virtchnl_iwarp_qv_info qv_info[1];
+};
+
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 00c42d803276..b2b48511f457 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -60,6 +60,7 @@ struct i40e_vsi {
int base_vector;
u16 work_limit;
u16 qs_handle;
+ void *priv; /* client driver data reference. */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -169,6 +170,7 @@ enum i40evf_state_t {
enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+ __I40EVF_IN_CLIENT_TASK,
};
/* make common code happy */
#define __I40E_DOWN __I40EVF_DOWN
@@ -178,6 +180,7 @@ struct i40evf_adapter {
struct timer_list watchdog_timer;
struct work_struct reset_task;
struct work_struct adminq_task;
+ struct delayed_work client_task;
struct delayed_work init_task;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
@@ -195,7 +198,10 @@ struct i40evf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
+ int num_iwarp_msix;
+ int iwarp_base_vector;
u32 client_pending;
+ struct i40e_client_instance *cinst;
struct msix_entry *msix_entries;
u32 flags;
@@ -211,8 +217,11 @@ struct i40evf_adapter {
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
-#define I40EVF_FLAG_PROMISC_ON BIT(15)
-#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15)
+#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17)
+#define I40EVF_FLAG_PROMISC_ON BIT(18)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(19)
/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -258,10 +267,11 @@ struct i40evf_adapter {
bool link_up;
enum i40e_aq_link_speed link_speed;
enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
0)
+#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -292,6 +302,12 @@ struct i40evf_adapter {
/* Ethtool Private Flags */
+/* lan device */
+struct i40e_device {
+ struct list_head list;
+ struct i40evf_adapter *vf;
+};
+
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
extern const char i40evf_driver_version[];
@@ -337,4 +353,11 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
int i40evf_config_rss(struct i40evf_adapter *adapter);
+int i40evf_lan_add_device(struct i40evf_adapter *adapter);
+int i40evf_lan_del_device(struct i40evf_adapter *adapter);
+void i40evf_client_subtask(struct i40evf_adapter *adapter);
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
+void i40evf_notify_client_open(struct i40e_vsi *vsi);
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
new file mode 100644
index 000000000000..5b43e5b6e2eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -0,0 +1,563 @@
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+#include "i40evf_client.h"
+
+static
+const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
+static struct i40e_client *vf_registered_client;
+static LIST_HEAD(i40evf_devices);
+static DEFINE_MUTEX(i40evf_device_mutex);
+
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
+
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info);
+
+static struct i40e_ops i40evf_lan_ops = {
+ .virtchnl_send = i40evf_client_virtchnl_send,
+ .setup_qvlist = i40evf_client_setup_qvlist,
+};
+
+/**
+ * i40evf_notify_client_message - call the client message receive callback
+ * @vsi: the VSI associated with this client
+ * @msg: message buffer
+ * @len: length of message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+
+ if (!vsi)
+ return;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->virtchnl_receive) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance virtchnl_receive function\n");
+ return;
+ }
+ cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client,
+ msg, len);
+}
+
+/**
+ * i40evf_notify_client_l2_params - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+ struct i40e_params params;
+
+ if (!vsi)
+ return;
+ memset(&params, 0, sizeof(params));
+ params.mtu = vsi->netdev->mtu;
+ params.link_up = vsi->back->link_up;
+ params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change function\n");
+ return;
+ }
+ cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
+ &params);
+}
+
+/**
+ * i40evf_notify_client_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40evf_notify_client_open(struct i40e_vsi *vsi)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+ int ret;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->open) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance open function\n");
+ return;
+ }
+ if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
+ ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ }
+}
+
+/**
+ * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * @ldev: pointer to L2 context.
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_release_qvlist(struct i40e_info *ldev)
+{
+ struct i40evf_adapter *adapter = ldev->vf;
+ i40e_status err;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ err = i40e_aq_send_msg_to_pf(&adapter->hw,
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ I40E_SUCCESS, NULL, 0, NULL);
+
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40evf_notify_client_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close function\n");
+ return;
+ }
+ cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
+ i40evf_client_release_qvlist(&cinst->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+}
+
+/**
+ * i40evf_client_add_instance - add a client instance to the instance list
+ * @adapter: pointer to the board struct
+ * @client: pointer to a client struct in the client list.
+ *
+ * Returns cinst ptr on success, NULL on failure
+ **/
+static struct i40e_client_instance *
+i40evf_client_add_instance(struct i40evf_adapter *adapter)
+{
+ struct i40e_client_instance *cinst = NULL;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ int i;
+
+ if (!vf_registered_client)
+ goto out;
+
+ if (adapter->cinst) {
+ cinst = adapter->cinst;
+ goto out;
+ }
+
+ cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
+ if (!cinst)
+ goto out;
+
+ cinst->lan_info.vf = (void *)adapter;
+ cinst->lan_info.netdev = vsi->netdev;
+ cinst->lan_info.pcidev = adapter->pdev;
+ cinst->lan_info.fid = 0;
+ cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
+ cinst->lan_info.hw_addr = adapter->hw.hw_addr;
+ cinst->lan_info.ops = &i40evf_lan_ops;
+ cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
+ cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
+ cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
+
+ cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+ cinst->lan_info.msix_entries =
+ &adapter->msix_entries[adapter->iwarp_base_vector];
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ cinst->lan_info.params.qos.prio_qos[i].tc = 0;
+ cinst->lan_info.params.qos.prio_qos[i].qs_handle =
+ vsi->qs_handle;
+ }
+
+ mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (mac)
+ ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
+ else
+ dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
+
+ cinst->client = vf_registered_client;
+ adapter->cinst = cinst;
+out:
+ return cinst;
+}
+
+/**
+ * i40evf_client_del_instance - removes a client instance from the list
+ * @adapter: pointer to the board struct
+ * @client: pointer to the client struct
+ *
+ **/
+static
+void i40evf_client_del_instance(struct i40evf_adapter *adapter)
+{
+ kfree(adapter->cinst);
+ adapter->cinst = NULL;
+}
+
+/**
+ * i40evf_client_subtask - client maintenance work
+ * @adapter: board private structure
+ **/
+void i40evf_client_subtask(struct i40evf_adapter *adapter)
+{
+ struct i40e_client *client = vf_registered_client;
+ struct i40e_client_instance *cinst;
+ int ret = 0;
+
+ if (adapter->state < __I40EVF_DOWN)
+ return;
+
+ /* first check client is registered */
+ if (!client)
+ return;
+
+ /* Add the client instance to the instance list */
+ cinst = i40evf_client_add_instance(adapter);
+ if (!cinst)
+ return;
+
+ dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
+ client->name);
+
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ /* Send an Open request to the client */
+
+ if (client->ops && client->ops->open)
+ ret = client->ops->open(&cinst->lan_info, client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cinst->state);
+ else
+ /* remove client instance */
+ i40evf_client_del_instance(adapter);
+ }
+}
+
+/**
+ * i40evf_lan_add_device - add a lan device struct to the list of lan devices
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40evf_lan_add_device(struct i40evf_adapter *adapter)
+{
+ struct i40e_device *ldev;
+ int ret = 0;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ if (ldev->vf == adapter) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ldev->vf = adapter;
+ INIT_LIST_HEAD(&ldev->list);
+ list_add(&ldev->list, &i40evf_devices);
+ dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+ adapter->hw.bus.bus_id, adapter->hw.bus.device,
+ adapter->hw.bus.func);
+
+ /* Since in some cases register may have happened before a device gets
+ * added, we can schedule a subtask to go initiate the clients.
+ */
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+
+out:
+ mutex_unlock(&i40evf_device_mutex);
+ return ret;
+}
+
+/**
+ * i40evf_lan_del_device - removes a lan device from the device list
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_lan_del_device(struct i40evf_adapter *adapter)
+{
+ struct i40e_device *ldev, *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
+ if (ldev->vf == adapter) {
+ dev_info(&adapter->pdev->dev,
+ "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+ adapter->hw.bus.bus_id, adapter->hw.bus.device,
+ adapter->hw.bus.func);
+ list_del(&ldev->list);
+ kfree(ldev);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&i40evf_device_mutex);
+ return ret;
+}
+
+/**
+ * i40evf_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_release(struct i40e_client *client)
+{
+ struct i40e_client_instance *cinst;
+ struct i40e_device *ldev;
+ struct i40evf_adapter *adapter;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ adapter = ldev->vf;
+ cinst = adapter->cinst;
+ if (!cinst)
+ continue;
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ if (client->ops && client->ops->close)
+ client->ops->close(&cinst->lan_info, client,
+ false);
+ i40evf_client_release_qvlist(&cinst->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+
+ dev_warn(&adapter->pdev->dev,
+ "Client %s instance closed\n", client->name);
+ }
+ /* delete the client instance */
+ i40evf_client_del_instance(adapter);
+ dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
+ client->name);
+ }
+ mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_prepare(struct i40e_client *client)
+{
+ struct i40e_device *ldev;
+ struct i40evf_adapter *adapter;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ adapter = ldev->vf;
+ /* Signal the watchdog to service the client */
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ }
+ mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_virtchnl_send - send a message to the PF instance
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len)
+{
+ struct i40evf_adapter *adapter = ldev->vf;
+ i40e_status err;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ err = i40e_aq_send_msg_to_pf(&adapter->hw, I40E_VIRTCHNL_OP_IWARP,
+ I40E_SUCCESS, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qv_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info)
+{
+ struct i40e_virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct i40evf_adapter *adapter = ldev->vf;
+ struct i40e_qv_info *qv_info;
+ i40e_status err;
+ u32 v_idx, i;
+ u32 msg_size;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ /* A quick check on whether the vectors belong to the client */
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+ if ((v_idx >=
+ (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
+ (v_idx < adapter->iwarp_base_vector))
+ return -EINVAL;
+ }
+
+ v_qvlist_info = (struct i40e_virtchnl_iwarp_qvlist_info *)qvlist_info;
+ msg_size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ (v_qvlist_info->num_vectors - 1));
+
+ adapter->client_pending |= BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ err = i40e_aq_send_msg_to_pf(&adapter->hw,
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ err = -EBUSY;
+ for (i = 0; i < 5; i++) {
+ msleep(100);
+ if (!(adapter->client_pending &
+ BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ err = 0;
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+/**
+ * i40evf_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_register_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ if (!client) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (strlen(client->name) == 0) {
+ pr_info("i40evf: Failed to register client with no name\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (vf_registered_client) {
+ pr_info("i40evf: Client %s has already been registered!\n",
+ client->name);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
+ (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
+ pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
+ client->name);
+ pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+ client->version.major, client->version.minor,
+ client->version.build,
+ i40evf_client_interface_version_str);
+ ret = -EIO;
+ goto out;
+ }
+
+ vf_registered_client = client;
+
+ i40evf_client_prepare(client);
+
+ pr_info("i40evf: Registered client %s with return code %d\n",
+ client->name, ret);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40evf_register_client);
+
+/**
+ * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_unregister_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ i40evf_client_release(client);
+
+ if (vf_registered_client != client) {
+ pr_info("i40evf: Client %s has not been registered\n",
+ client->name);
+ ret = -ENODEV;
+ goto out;
+ }
+ vf_registered_client = NULL;
+ pr_info("i40evf: Unregistered client %s\n", client->name);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40evf_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
new file mode 100644
index 000000000000..7d283c7506a5
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -0,0 +1,166 @@
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40EVF_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40EVF_CLIENT_VERSION_MAJOR 0
+#define I40EVF_CLIENT_VERSION_MINOR 01
+#define I40EVF_CLIENT_VERSION_BUILD 00
+#define I40EVF_CLIENT_VERSION_STR \
+ __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
+ __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
+ __stringify(I40EVF_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+ u8 major;
+ u8 minor;
+ u8 build;
+ u8 rsvd;
+};
+
+enum i40e_client_state {
+ __I40E_CLIENT_NULL,
+ __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+ __I40E_CLIENT_INSTANCE_NONE,
+ __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+ u32 num_vectors;
+ struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+ u16 qs_handle; /* qs handle for prio */
+ u8 tc; /* TC mapped to prio */
+ u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY 8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+ struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+ struct i40e_qos_params qos;
+ u16 mtu;
+ u16 link_up; /* boolean */
+};
+
+/* Structure to hold LAN device info for a client device */
+struct i40e_info {
+ struct i40e_client_version version;
+ u8 lanmac[6];
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ u8 __iomem *hw_addr;
+ u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+ u8 ftype; /* function type, PF or VF */
+ void *vf; /* cast to i40evf_adapter */
+
+ /* All L2 params that could change during the life span of the device
+ * and needs to be communicated to the client when they change
+ */
+ struct i40e_params params;
+ struct i40e_ops *ops;
+
+ u16 msix_count; /* number of msix vectors*/
+ /* Array down below will be dynamically allocated based on msix_count */
+ struct msix_entry *msix_entries;
+ u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+};
+
+struct i40e_ops {
+ /* setup_q_vector_list enables queues with a particular vector */
+ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+ struct i40e_qvlist_info *qv_info);
+
+ u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u8 *msg, u16 len);
+
+ /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
+ void (*request_reset)(struct i40e_info *ldev,
+ struct i40e_client *client);
+};
+
+struct i40e_client_ops {
+ /* Should be called from register_client() or whenever the driver is
+ * ready to create a specific client instance.
+ */
+ int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+ /* Should be closed when netdev is unavailable or when unregister
+ * call comes in. If the close happens due to a reset, set the reset
+ * bit to true.
+ */
+ void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ bool reset);
+
+ /* called when some l2 managed parameters changes - mss */
+ void (*l2_param_change)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params);
+
+ /* called when a message is received from the PF */
+ int (*virtchnl_receive)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
+};
+
+/* Client device */
+struct i40e_client_instance {
+ struct list_head list;
+ struct i40e_info lan_info;
+ struct i40e_client *client;
+ unsigned long state;
+};
+
+struct i40e_client {
+ struct list_head list; /* list of registered clients */
+ char name[I40EVF_CLIENT_STR_LENGTH];
+ struct i40e_client_version version;
+ unsigned long state; /* client state */
+ atomic_t ref_cnt; /* Count of all the client devices of this kind */
+ u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
+ u8 type;
+#define I40E_CLIENT_IWARP 0
+ struct i40e_client_ops *ops; /* client ops provided by the client */
+};
+
+/* used by clients */
+int i40evf_register_client(struct i40e_client *client);
+int i40evf_unregister_client(struct i40e_client *client);
+#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 272d600c1ed0..122efbd29a19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -64,51 +64,50 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
/**
- * i40evf_get_settings - Get Link Speed and Duplex settings
+ * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
- * @ecmd: ethtool command
+ * @cmd: ethtool command
*
* Reports speed/duplex settings. Because this is a VF, we don't know what
* kind of link we really have, so we fake it.
**/
-static int i40evf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40evf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- ecmd->supported = 0;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = PORT_NONE;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
/* Set speed and duplex */
switch (adapter->link_speed) {
case I40E_LINK_SPEED_40GB:
- ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ cmd->base.speed = SPEED_40000;
break;
case I40E_LINK_SPEED_25GB:
#ifdef SPEED_25000
- ethtool_cmd_speed_set(ecmd, SPEED_25000);
+ cmd->base.speed = SPEED_25000;
#else
netdev_info(netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
case I40E_LINK_SPEED_20GB:
- ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ cmd->base.speed = SPEED_20000;
break;
case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
return 0;
}
@@ -643,7 +642,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops i40evf_ethtool_ops = {
- .get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = i40evf_get_ringparam,
@@ -663,6 +661,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
.get_rxfh_key_size = i40evf_get_rxfh_key_size,
+ .get_link_ksettings = i40evf_get_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f35dcaac5bb7..6d666bde9df5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -26,6 +26,7 @@
#include "i40evf.h"
#include "i40e_prototype.h"
+#include "i40evf_client.h"
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
@@ -36,9 +37,9 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 7
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -1058,6 +1059,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
i40evf_napi_enable_all(adapter);
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1685,6 +1688,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_set_promiscuous(adapter, 0);
goto watchdog_done;
}
+ schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
@@ -1773,10 +1777,17 @@ static void i40evf_reset_task(struct work_struct *work)
u32 reg_val;
int i = 0, err;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
-
+ if (CLIENT_ENABLED(adapter)) {
+ adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
+ I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
+ I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
+ I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
+ cancel_delayed_work_sync(&adapter->client_task);
+ i40evf_notify_client_close(&adapter->vsi, true);
+ }
i40evf_misc_irq_disable(adapter);
if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
@@ -1819,6 +1830,7 @@ static void i40evf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
i40evf_disable_vf(adapter);
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -1861,9 +1873,8 @@ continue_reset:
}
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- /* Open RDMA Client again */
- adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1980,6 +1991,48 @@ out:
}
/**
+ * i40evf_client_task - worker thread to perform client work
+ * @work: pointer to work_struct containing our data
+ *
+ * This task handles client interactions. Because client calls can be
+ * reentrant, we can't handle them in the watchdog.
+ **/
+static void i40evf_client_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, client_task.work);
+
+ /* If we can't get the client bit, just give up. We'll be rescheduled
+ * later.
+ */
+
+ if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
+ return;
+
+ if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
+ i40evf_client_subtask(adapter);
+ adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
+ i40evf_notify_client_close(&adapter->vsi, false);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
+ i40evf_notify_client_open(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ }
+out:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+}
+
+/**
* i40evf_free_all_tx_resources - Free Tx Resources for All Queues
* @adapter: board private structure
*
@@ -2148,6 +2201,8 @@ static int i40evf_close(struct net_device *netdev)
set_bit(__I40E_DOWN, &adapter->vsi.state);
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
i40evf_down(adapter);
adapter->state = __I40EVF_DOWN_PENDING;
@@ -2188,6 +2243,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
struct i40evf_adapter *adapter = netdev_priv(netdev);
netdev->mtu = new_mtu;
+ if (CLIENT_ENABLED(adapter)) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ }
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
@@ -2581,6 +2640,12 @@ static void i40evf_init_task(struct work_struct *work)
adapter->netdev_registered = true;
netif_tx_stop_all_queues(netdev);
+ if (CLIENT_ALLOWED(adapter)) {
+ err = i40evf_lan_add_device(adapter);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
+ err);
+ }
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
@@ -2745,6 +2810,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, i40evf_reset_task);
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
schedule_delayed_work(&adapter->init_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
@@ -2857,14 +2923,21 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
+ int err;
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
-
+ cancel_delayed_work_sync(&adapter->client_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
adapter->netdev_registered = false;
}
+ if (CLIENT_ALLOWED(adapter)) {
+ err = i40evf_lan_del_device(adapter);
+ if (err)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ err);
+ }
/* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index bee58af390e1..a2a7354426a3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -26,6 +26,7 @@
#include "i40evf.h"
#include "i40e_prototype.h"
+#include "i40evf_client.h"
/* busy wait delay in msec */
#define I40EVF_BUSY_WAIT_DELAY 10
@@ -999,6 +1000,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_IWARP:
+ /* Gobble zero-length replies from the PF. They indicate that
+ * a previous message was received OK, and the client doesn't
+ * care about that.
+ */
+ if (msglen && CLIENT_ENABLED(adapter))
+ i40evf_notify_client_message(&adapter->vsi,
+ msg, msglen);
+ break;
+
case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
adapter->client_pending &=
~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
@@ -1014,7 +1025,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
default:
- if (v_opcode != adapter->current_op)
+ if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
adapter->current_op, v_opcode);
break;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index acbc3abe2ddd..dc6e2980718f 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -142,12 +142,24 @@ struct vf_data_storage {
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_2048 2048
+#define IGB_RXBUFFER_3072 3072
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
-#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
+#define IGB_TS_HDR_LEN 16
+
+#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IGB_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
+#else
+#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
+#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IGB_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
#define AUTO_ALL_MODES 0
#define IGB_EEPROM_APME 0x0400
@@ -301,12 +313,51 @@ struct igb_q_vector {
};
enum e1000_ring_flags_t {
+ IGB_RING_FLAG_RX_3K_BUFFER,
+ IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
+#define ring_uses_large_buffer(ring) \
+ test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define set_ring_uses_large_buffer(ring) \
+ set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define clear_ring_uses_large_buffer(ring) \
+ clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+
+#define ring_uses_build_skb(ring) \
+ test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
+static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return IGB_RXBUFFER_3072;
+
+ if (ring_uses_build_skb(ring))
+ return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
+#endif
+ return IGB_RXBUFFER_2048;
+}
+
+static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return 1;
+#endif
+ return 0;
+}
+
+#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
+
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
@@ -545,6 +596,7 @@ struct igb_adapter {
#define IGB_FLAG_HAS_MSIX BIT(13)
#define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
+#define IGB_FLAG_RX_LEGACY BIT(16)
/* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001
@@ -558,7 +610,6 @@ struct igb_adapter {
#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
#define IGB_82576_TSYNC_SHIFT 19
-#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
@@ -591,7 +642,6 @@ void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -604,7 +654,7 @@ void igb_ptp_reset(struct igb_adapter *adapter);
void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 737b664d004c..0efb62db6efd 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -144,7 +144,15 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
-static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings)
+
+static int igb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -152,76 +160,73 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
u32 speed;
+ u32 supported, advertising;
status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP |
- SUPPORTED_Pause);
- ecmd->advertising = ADVERTISED_TP;
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_Pause);
+ advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
- ecmd->advertising |= hw->phy.autoneg_advertised;
+ advertising |= hw->phy.autoneg_advertised;
}
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy.addr;
- ecmd->transceiver = XCVR_INTERNAL;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
} else {
- ecmd->supported = (SUPPORTED_FIBRE |
- SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause);
- ecmd->advertising = (ADVERTISED_FIBRE |
- ADVERTISED_1000baseKX_Full);
+ supported = (SUPPORTED_FIBRE |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
+ advertising = (ADVERTISED_FIBRE |
+ ADVERTISED_1000baseKX_Full);
if (hw->mac.type == e1000_i354) {
if ((hw->device_id ==
E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
!(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->supported |= SUPPORTED_2500baseX_Full;
- ecmd->supported &=
- ~SUPPORTED_1000baseKX_Full;
- ecmd->advertising |= ADVERTISED_2500baseX_Full;
- ecmd->advertising &=
- ~ADVERTISED_1000baseKX_Full;
+ supported |= SUPPORTED_2500baseX_Full;
+ supported &= ~SUPPORTED_1000baseKX_Full;
+ advertising |= ADVERTISED_2500baseX_Full;
+ advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
if (eth_flags->e100_base_fx) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
}
if (hw->mac.autoneg == 1)
- ecmd->advertising |= ADVERTISED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.port = PORT_FIBRE;
}
if (hw->mac.autoneg != 1)
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
switch (hw->fc.requested_mode) {
case e1000_fc_full:
- ecmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
break;
case e1000_fc_rx_pause:
- ecmd->advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
break;
case e1000_fc_tx_pause:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
}
if (status & E1000_STATUS_LU) {
if ((status & E1000_STATUS_2P5_SKU) &&
@@ -236,39 +241,46 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
speed = SPEED_UNKNOWN;
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_cmd_speed_set(ecmd, speed);
+ cmd->base.speed = speed;
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
- ecmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if (hw->phy.media_type == e1000_media_type_copper)
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
ETH_TP_MDI;
else
- ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->phy.mdix == AUTO_ALL_MODES)
- ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
- ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
return 0;
}
-static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int igb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 advertising;
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
@@ -283,12 +295,12 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
if (hw->phy.media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
- if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
- (ecmd->autoneg != AUTONEG_ENABLE)) {
+ if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (cmd->base.autoneg != AUTONEG_ENABLE)) {
dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
@@ -297,10 +309,13 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber) {
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
switch (adapter->link_speed) {
@@ -320,31 +335,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
break;
}
} else {
- hw->phy.autoneg_advertised = ecmd->advertising |
+ hw->phy.autoneg_advertised = advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
}
- ecmd->advertising = hw->phy.autoneg_advertised;
+ advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
/* calling this overrides forced MDI setting */
- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
- if (ecmd->eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl) {
/* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
- if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->phy.mdix = AUTO_ALL_MODES;
else
- hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
}
/* reset the link */
@@ -852,6 +867,8 @@ static void igb_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
}
static void igb_get_ringparam(struct net_device *netdev,
@@ -1811,14 +1828,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean;
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
+ while (rx_desc->wb.upper.length) {
/* check Rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* verify contents of skb */
@@ -1828,12 +1845,21 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer_info->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer_info, dma),
+ dma_unmap_len(tx_buffer_info, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer_info, len, 0);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -2271,6 +2297,8 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
return IGB_STATS_LEN;
case ETH_SS_TEST:
return IGB_TEST_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IGB_PRIV_FLAGS_STR_LEN;
default:
return -ENOTSUPP;
}
@@ -2376,6 +2404,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, igb_priv_flags_strings,
+ IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -3388,9 +3420,38 @@ static int igb_set_channels(struct net_device *netdev,
return 0;
}
+static u32 igb_get_priv_flags(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags & IGB_FLAG_RX_LEGACY)
+ priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags = adapter->flags;
+
+ flags &= ~IGB_FLAG_RX_LEGACY;
+ if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX)
+ flags |= IGB_FLAG_RX_LEGACY;
+
+ if (flags != adapter->flags) {
+ adapter->flags = flags;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
- .get_settings = igb_get_settings,
- .set_settings = igb_set_settings,
.get_drvinfo = igb_get_drvinfo,
.get_regs_len = igb_get_regs_len,
.get_regs = igb_get_regs,
@@ -3426,8 +3487,12 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_rxfh = igb_set_rxfh,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
+ .get_priv_flags = igb_get_priv_flags,
+ .set_priv_flags = igb_set_priv_flags,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
+ .get_link_ksettings = igb_get_link_ksettings,
+ .set_link_ksettings = igb_set_link_ksettings,
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index be456bae8169..26a821fcd220 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -554,7 +554,7 @@ rx_ring_summary:
16, 1,
page_address(buffer_info->page) +
buffer_info->page_offset,
- IGB_RX_BUFSZ, true);
+ igb_rx_bufsz(rx_ring), true);
}
}
}
@@ -3293,7 +3293,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -3404,6 +3404,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
txdctl |= IGB_TX_HTHRESH << 8;
txdctl |= IGB_TX_WTHRESH << 16;
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct igb_tx_buffer) * ring->count);
+
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl);
}
@@ -3435,7 +3439,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -3720,6 +3724,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
+ union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
@@ -3741,7 +3746,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -3758,11 +3766,39 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct igb_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IGB_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* enable receive descriptor fetching */
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring)
+{
+ /* set build_skb and buffer size flags */
+ clear_ring_build_skb_enabled(rx_ring);
+ clear_ring_uses_large_buffer(rx_ring);
+
+ if (adapter->flags & IGB_FLAG_RX_LEGACY)
+ return;
+
+ set_ring_build_skb_enabled(rx_ring);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ return;
+
+ set_ring_uses_large_buffer(rx_ring);
+#endif
+}
+
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3780,8 +3816,12 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = adapter->rx_ring[i];
+
+ igb_set_rx_buffer_len(adapter, rx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+ }
}
/**
@@ -3822,55 +3862,63 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
igb_free_tx_resources(adapter->tx_ring[i]);
}
-void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
- struct igb_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* buffer_info must be completely set up in the transmit path */
-}
-
/**
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
- struct igb_tx_buffer *buffer_info;
- unsigned long size;
- u16 i;
+ u16 i = tx_ring->next_to_clean;
+ struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (!tx_ring->tx_buffer_info)
- return;
- /* Free all the Tx ring sk_buffs */
+ while (i != tx_ring->next_to_use) {
+ union e1000_adv_tx_desc *eop_desc, *tx_desc;
- for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
- }
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
- netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
- size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
- /* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
}
@@ -3932,50 +3980,39 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
**/
static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
- unsigned long size;
- u16 i;
+ u16 i = rx_ring->next_to_clean;
if (rx_ring->skb)
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
- if (!rx_ring->rx_buffer_info)
- return;
-
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
+ while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- if (!buffer_info->page)
- continue;
-
/* Invalidate cache lines that may have been written to by
* device so that we avoid corrupting memory.
*/
dma_sync_single_range_for_cpu(rx_ring->dev,
buffer_info->dma,
buffer_info->page_offset,
- IGB_RX_BUFSZ,
+ igb_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev,
buffer_info->dma,
- PAGE_SIZE,
+ igb_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ IGB_RX_DMA_ATTR);
__page_frag_cache_drain(buffer_info->page,
buffer_info->pagecnt_bias);
- buffer_info->page = NULL;
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
}
- size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -4240,7 +4277,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
unsigned int vfn = adapter->vfs_allocated_count;
- u32 rctl = 0, vmolr = 0;
+ u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
int count;
/* Check for Promiscuous and All Multicast modes */
@@ -4298,6 +4335,14 @@ static void igb_set_rx_mode(struct net_device *netdev)
E1000_RCTL_VFE);
wr32(E1000_RCTL, rctl);
+#if (PAGE_SIZE < 8192)
+ if (!adapter->vfs_allocated_count) {
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ rlpml = IGB_MAX_FRAME_BUILD_SKB;
+ }
+#endif
+ wr32(E1000_RLPML, rlpml);
+
/* In order to support SR-IOV and eventually VMDq it is necessary to set
* the VMOLR to enable the appropriate modes. Without this workaround
* we will have issues with VLAN tag stripping not being done for frames
@@ -4312,12 +4357,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
vmolr |= rd32(E1000_VMOLR(vfn)) &
~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
- /* enable Rx jumbo frames, no need for restriction */
+ /* enable Rx jumbo frames, restrict as needed to support build_skb */
vmolr &= ~E1000_VMOLR_RLPML_MASK;
- vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE;
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+ vmolr |= IGB_MAX_FRAME_BUILD_SKB;
+ else
+#endif
+ vmolr |= MAX_JUMBO_FRAME_SIZE;
+ vmolr |= E1000_VMOLR_LPE;
wr32(E1000_VMOLR(vfn), vmolr);
- wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
igb_restore_vf_multicasts(adapter);
}
@@ -5256,18 +5306,32 @@ static void igb_tx_map(struct igb_ring *tx_ring,
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i--)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -5339,7 +5403,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
- igb_unmap_and_free_tx_resource(tx_ring, first);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
@@ -6686,7 +6751,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* clear last DMA location and unmap remaining buffers */
@@ -6822,8 +6886,14 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls.
+ */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool igb_page_is_reserved(struct page *page)
@@ -6831,11 +6901,10 @@ static inline bool igb_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
- struct page *page,
- unsigned int truesize)
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page)))
@@ -6843,16 +6912,13 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_ref_count(page) != pagecnt_bias))
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+#define IGB_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ if (rx_buffer->page_offset > IGB_LAST_OFFSET)
return false;
#endif
@@ -6860,7 +6926,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(pagecnt_bias == 1)) {
+ if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -6872,34 +6938,56 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of buffer to be added
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
- *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
**/
-static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+static void igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- unsigned int size,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = IGB_RX_BUFSZ;
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+#endif
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
+static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
- unsigned int pull_len;
+ unsigned int headlen;
+ struct sk_buff *skb;
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
+ if (unlikely(!skb))
+ return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
@@ -6907,95 +6995,73 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
size -= IGB_TS_HDR_LEN;
}
- if (likely(size <= IGB_RX_HDR_LEN)) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!igb_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- return false;
- }
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IGB_RX_HDR_LEN)
+ headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
- va += pull_len;
- size -= pull_len;
-
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ (va + headlen) - page_address(rx_buffer->page),
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
- return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+ return skb;
}
-static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ unsigned int size)
{
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
- struct igb_rx_buffer *rx_buffer;
- struct page *page;
-
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_failed++;
- return NULL;
- }
+ /* build an skb around the page buffer */
+ skb = build_skb(va - IGB_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IGB_SKB_PAD);
+ __skb_put(skb, size);
- /* pull page into skb */
- if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- igb_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- /* We are not reusing the buffer so unmap it and free
- * any references we are holding to it
- */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
+ /* pull timestamp out of packet data */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ __skb_pull(skb, IGB_TS_HDR_LEN);
}
- /* clear contents of rx_buffer */
- rx_buffer->page = NULL;
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
return skb;
}
@@ -7154,6 +7220,47 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
+ const unsigned int size)
+{
+ struct igb_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer)
+{
+ if (igb_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ IGB_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
struct igb_ring *rx_ring = q_vector->rx.ring;
@@ -7163,6 +7270,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *rx_buffer;
+ unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -7171,8 +7280,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
}
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!rx_desc->wb.upper.status_error)
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
break;
/* This memory barrier is needed to keep us from reading
@@ -7181,13 +7290,25 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
*/
dma_rmb();
+ rx_buffer = igb_get_rx_buffer(rx_ring, size);
+
/* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (skb)
+ igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ else
+ skb = igb_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ rx_buffer->pagecnt_bias++;
break;
+ }
+ igb_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
/* fetch next buffer in frame if non-eop */
@@ -7231,6 +7352,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
return total_packets;
}
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
+}
+
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
@@ -7242,21 +7368,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ igb_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGB_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
+ __free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
return false;
@@ -7264,7 +7392,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = igb_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
return true;
@@ -7279,6 +7407,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ u16 bufsz;
/* nothing to do */
if (!cleaned_count)
@@ -7288,14 +7417,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
+ bufsz = igb_rx_bufsz(rx_ring);
+
do {
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- IGB_RX_BUFSZ,
+ bi->page_offset, bufsz,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
@@ -7312,8 +7442,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c4477552ce9e..7a3fd4d74592 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -764,8 +764,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
* incoming frame. The value is stored in little endian format starting on
* byte 8.
**/
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8dea1b1367ef..34faa113a8a0 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -71,45 +71,45 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
-static int igbvf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int igbvf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 status;
- ecmd->supported = SUPPORTED_1000baseT_Full;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ecmd->advertising = ADVERTISED_1000baseT_Full;
-
- ecmd->port = -1;
- ecmd->transceiver = XCVR_DUMMY1;
+ cmd->base.port = -1;
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
else
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- ecmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
-static int igbvf_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int igbvf_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
return -EOPNOTSUPP;
}
@@ -443,8 +443,6 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops igbvf_ethtool_ops = {
- .get_settings = igbvf_get_settings,
- .set_settings = igbvf_set_settings,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
@@ -467,6 +465,8 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
.get_ethtool_stats = igbvf_get_ethtool_stats,
.get_coalesce = igbvf_get_coalesce,
.set_coalesce = igbvf_set_coalesce,
+ .get_link_ksettings = igbvf_get_link_ksettings,
+ .set_link_ksettings = igbvf_set_link_ksettings,
};
void igbvf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index e5d72559cca9..d10a0d242dda 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -94,24 +94,30 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
static int
-ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ixgb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ cmd->base.port = PORT_FIBRE;
if (netif_carrier_ok(adapter->netdev)) {
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -126,13 +132,14 @@ void ixgb_set_speed_duplex(struct net_device *netdev)
}
static int
-ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ixgb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
- if (ecmd->autoneg == AUTONEG_ENABLE ||
- (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
if (netif_running(adapter->netdev)) {
@@ -630,8 +637,6 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static const struct ethtool_ops ixgb_ethtool_ops = {
- .get_settings = ixgb_get_settings,
- .set_settings = ixgb_set_settings,
.get_drvinfo = ixgb_get_drvinfo,
.get_regs_len = ixgb_get_regs_len,
.get_regs = ixgb_get_regs,
@@ -649,6 +654,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
.set_phys_id = ixgb_set_phys_id,
.get_sset_count = ixgb_get_sset_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
+ .get_link_ksettings = ixgb_get_link_ksettings,
+ .set_link_ksettings = ixgb_set_link_ksettings,
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 90fa5bf23d1b..0da0752fedef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -186,60 +186,62 @@ static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
}
}
-static int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
+ supported |= ixgbe_get_supported_10gtypes(hw);
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
+ supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
SUPPORTED_1000baseKX_Full :
SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
- ecmd->supported |= SUPPORTED_100baseT_Full;
+ supported |= SUPPORTED_100baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_10_FULL)
- ecmd->supported |= SUPPORTED_10baseT_Full;
+ supported |= SUPPORTED_10baseT_Full;
/* default advertised speed if phy.autoneg_advertised isn't set */
- ecmd->advertising = ecmd->supported;
+ advertising = supported;
/* set the advertised speeds */
if (hw->phy.autoneg_advertised) {
- ecmd->advertising = 0;
+ advertising = 0;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G;
+ advertising |= supported & ADVRTSD_MSK_10G;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
- if (ecmd->supported & SUPPORTED_1000baseKX_Full)
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+ if (supported & SUPPORTED_1000baseKX_Full)
+ advertising |= ADVERTISED_1000baseKX_Full;
else
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ advertising |= ADVERTISED_1000baseT_Full;
}
} else {
if (hw->phy.multispeed_fiber && !autoneg) {
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
}
}
if (autoneg) {
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->advertising |= ADVERTISED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else
- ecmd->autoneg = AUTONEG_DISABLE;
-
- ecmd->transceiver = XCVR_EXTERNAL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* Determine the remaining settings based on the PHY type. */
switch (adapter->hw.phy.type) {
@@ -248,14 +250,14 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_x550em_ext_t:
case ixgbe_phy_fw:
case ixgbe_phy_cu_unknown:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
break;
case ixgbe_phy_qt:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
case ixgbe_phy_sfp_passive_tyco:
@@ -273,9 +275,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_DA;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
case ixgbe_sfp_type_lr:
@@ -285,102 +287,113 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_NONE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
break;
case ixgbe_sfp_type_unknown:
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_NONE;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_OTHER;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_OTHER;
break;
}
/* Indicate pause support */
- ecmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
switch (hw->fc.requested_mode) {
case ixgbe_fc_full:
- ecmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
break;
case ixgbe_fc_rx_pause:
- ecmd->advertising |= ADVERTISED_Pause |
+ advertising |= ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
break;
case ixgbe_fc_tx_pause:
- ecmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
break;
default:
- ecmd->advertising &= ~(ADVERTISED_Pause |
+ advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
}
if (netif_carrier_ok(netdev)) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case IXGBE_LINK_SPEED_5GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_5000);
+ cmd->base.speed = SPEED_5000;
break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_2500);
+ cmd->base.speed = SPEED_2500;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case IXGBE_LINK_SPEED_100_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
case IXGBE_LINK_SPEED_10_FULL:
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int ixgbe_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
s32 err = 0;
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -388,12 +401,12 @@ static int ixgbe_set_settings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (ecmd->advertising & ~ecmd->supported)
+ if (advertising & ~supported)
return -EINVAL;
/* only allow one speed at a time if no autoneg */
- if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
- if (ecmd->advertising ==
+ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
+ if (advertising ==
(ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full))
return -EINVAL;
@@ -401,16 +414,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
old = hw->phy.autoneg_advertised;
advertised = 0;
- if (ecmd->advertising & ADVERTISED_10000baseT_Full)
+ if (advertising & ADVERTISED_10000baseT_Full)
advertised |= IXGBE_LINK_SPEED_10GB_FULL;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ if (advertising & ADVERTISED_1000baseT_Full)
advertised |= IXGBE_LINK_SPEED_1GB_FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
advertised |= IXGBE_LINK_SPEED_100_FULL;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
advertised |= IXGBE_LINK_SPEED_10_FULL;
if (old == advertised)
@@ -428,10 +441,11 @@ static int ixgbe_set_settings(struct net_device *netdev,
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
} else {
/* in this case we currently only support 10Gb/FULL */
- u32 speed = ethtool_cmd_speed(ecmd);
- if ((ecmd->autoneg == AUTONEG_ENABLE) ||
- (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
- (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ u32 speed = cmd->base.speed;
+
+ if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+ (advertising != ADVERTISED_10000baseT_Full) ||
+ (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
@@ -3402,8 +3416,6 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
- .get_settings = ixgbe_get_settings,
- .set_settings = ixgbe_set_settings,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
@@ -3442,6 +3454,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a7a430a7be2c..852a2e7e25ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2122,7 +2122,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
prefetch(va + L1_CACHE_BYTES);
#endif
- /* build an skb to around the page buffer */
+ /* build an skb around the page buffer */
skb = build_skb(va - IXGBE_SKB_PAD, truesize);
if (unlikely(!skb))
return NULL;
@@ -8948,7 +8948,9 @@ static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return ixgbe_setup_tc(dev, tc->tc);
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return ixgbe_setup_tc(dev, tc->mqprio->num_tc);
}
#ifdef CONFIG_PCI_IOV