diff options
Diffstat (limited to 'drivers/net/ethernet')
199 files changed, 2266 insertions, 1058 deletions
| diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 1744d623999d..606c97610808 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -1512,16 +1512,15 @@ static struct notifier_block adin1110_switchdev_notifier = {  	.notifier_call = adin1110_switchdev_event,  }; -static void adin1110_unregister_notifiers(void *data) +static void adin1110_unregister_notifiers(void)  {  	unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);  	unregister_switchdev_notifier(&adin1110_switchdev_notifier);  	unregister_netdevice_notifier(&adin1110_netdevice_nb);  } -static int adin1110_setup_notifiers(struct adin1110_priv *priv) +static int adin1110_setup_notifiers(void)  { -	struct device *dev = &priv->spidev->dev;  	int ret;  	ret = register_netdevice_notifier(&adin1110_netdevice_nb); @@ -1536,13 +1535,14 @@ static int adin1110_setup_notifiers(struct adin1110_priv *priv)  	if (ret < 0)  		goto err_sdev; -	return devm_add_action_or_reset(dev, adin1110_unregister_notifiers, NULL); +	return 0;  err_sdev:  	unregister_switchdev_notifier(&adin1110_switchdev_notifier);  err_netdev:  	unregister_netdevice_notifier(&adin1110_netdevice_nb); +  	return ret;  } @@ -1613,10 +1613,6 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)  	if (ret < 0)  		return ret; -	ret = adin1110_setup_notifiers(priv); -	if (ret < 0) -		return ret; -  	for (i = 0; i < priv->cfg->ports_nr; i++) {  		ret = devm_register_netdev(dev, priv->ports[i]->netdev);  		if (ret < 0) { @@ -1693,7 +1689,31 @@ static struct spi_driver adin1110_driver = {  	.probe = adin1110_probe,  	.id_table = adin1110_spi_id,  }; -module_spi_driver(adin1110_driver); + +static int __init adin1110_driver_init(void) +{ +	int ret; + +	ret = adin1110_setup_notifiers(); +	if (ret < 0) +		return ret; + +	ret = spi_register_driver(&adin1110_driver); +	if (ret < 0) { +		adin1110_unregister_notifiers(); +		return ret; +	} + +	return 0; +} + +static void __exit adin1110_exit(void) +{ +	adin1110_unregister_notifiers(); +	spi_unregister_driver(&adin1110_driver); +} +module_init(adin1110_driver_init); +module_exit(adin1110_exit);  MODULE_DESCRIPTION("ADIN1110 Network driver");  MODULE_AUTHOR("Alexandru Tachici <[email protected]>"); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index e104fb02817d..aa0d2f3aaeaa 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth)  			if (dma_mapping_error(greth->dev, dma_addr)) {  				if (netif_msg_ifup(greth))  					dev_err(greth->dev, "Could not create initial DMA mapping\n"); +				dev_kfree_skb(skb);  				goto cleanup;  			}  			greth->rx_skbuff[i] = skb; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 7633b227b2ca..711d5b5a4c49 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -990,6 +990,7 @@ static int tse_shutdown(struct net_device *dev)  	int ret;  	phylink_stop(priv->phylink); +	phylink_disconnect_phy(priv->phylink);  	netif_stop_queue(dev);  	napi_disable(&priv->napi); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d350eeec8bad..5a454b58498f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4543,13 +4543,19 @@ static struct pci_driver ena_pci_driver = {  static int __init ena_init(void)  { +	int ret; +  	ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);  	if (!ena_wq) {  		pr_err("Failed to create workqueue\n");  		return -ENOMEM;  	} -	return pci_register_driver(&ena_pci_driver); +	ret = pci_register_driver(&ena_pci_driver); +	if (ret) +		destroy_workqueue(ena_wq); + +	return ret;  }  static void __exit ena_cleanup(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 2af3da4b2d05..f409d7bd1f1e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -285,6 +285,9 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)  		/* Yellow Carp devices do not need cdr workaround */  		pdata->vdata->an_cdr_workaround = 0; + +		/* Yellow Carp devices do not need rrc */ +		pdata->vdata->enable_rrc = 0;  	} else {  		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;  		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; @@ -483,6 +486,7 @@ static struct xgbe_version_data xgbe_v2a = {  	.tx_desc_prefetch		= 5,  	.rx_desc_prefetch		= 5,  	.an_cdr_workaround		= 1, +	.enable_rrc			= 1,  };  static struct xgbe_version_data xgbe_v2b = { @@ -498,6 +502,7 @@ static struct xgbe_version_data xgbe_v2b = {  	.tx_desc_prefetch		= 5,  	.rx_desc_prefetch		= 5,  	.an_cdr_workaround		= 1, +	.enable_rrc			= 1,  };  static const struct pci_device_id xgbe_pci_table[] = { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 2156600641b6..4064c3e3dd49 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -239,6 +239,7 @@ enum xgbe_sfp_speed {  #define XGBE_SFP_BASE_BR_1GBE_MAX		0x0d  #define XGBE_SFP_BASE_BR_10GBE_MIN		0x64  #define XGBE_SFP_BASE_BR_10GBE_MAX		0x68 +#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX	0x78  #define XGBE_SFP_BASE_CU_CABLE_LEN		18 @@ -284,6 +285,8 @@ struct xgbe_sfp_eeprom {  #define XGBE_BEL_FUSE_VENDOR	"BEL-FUSE        "  #define XGBE_BEL_FUSE_PARTNO	"1GBT-SFP06      " +#define XGBE_MOLEX_VENDOR	"Molex Inc.      " +  struct xgbe_sfp_ascii {  	union {  		char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; @@ -834,7 +837,11 @@ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,  		break;  	case XGBE_SFP_SPEED_10000:  		min = XGBE_SFP_BASE_BR_10GBE_MIN; -		max = XGBE_SFP_BASE_BR_10GBE_MAX; +		if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], +			   XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0) +			max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX; +		else +			max = XGBE_SFP_BASE_BR_10GBE_MAX;  		break;  	default:  		return false; @@ -1151,7 +1158,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)  	}  	/* Determine the type of SFP */ -	if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) +	if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE && +	    xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) +		phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; +	else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)  		phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;  	else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)  		phy_data->sfp_base = XGBE_SFP_BASE_10000_LR; @@ -1167,9 +1177,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)  		phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;  	else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)  		phy_data->sfp_base = XGBE_SFP_BASE_1000_T; -	else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) && -		 xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) -		phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;  	switch (phy_data->sfp_base) {  	case XGBE_SFP_BASE_1000_T: @@ -1979,6 +1986,10 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)  static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)  { +	/* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */ +	if (pdata->phy.autoneg != AUTONEG_DISABLE) +		return; +  	XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,  			 XGBE_PMA_PLL_CTRL_MASK,  			 enable ? XGBE_PMA_PLL_CTRL_ENABLE @@ -1989,7 +2000,7 @@ static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)  }  static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, -					unsigned int cmd, unsigned int sub_cmd) +					enum xgbe_mb_cmd cmd, enum xgbe_mb_subcmd sub_cmd)  {  	unsigned int s0 = 0;  	unsigned int wait; @@ -2029,14 +2040,16 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,  	xgbe_phy_rx_reset(pdata);  reenable_pll: -	/* Enable PLL re-initialization */ -	xgbe_phy_pll_ctrl(pdata, true); +	/* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */ +	if (cmd != XGBE_MB_CMD_POWER_OFF && +	    cmd != XGBE_MB_CMD_RRC) +		xgbe_phy_pll_ctrl(pdata, true);  }  static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)  {  	/* Receiver Reset Cycle */ -	xgbe_phy_perform_ratechange(pdata, 5, 0); +	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_RRC, XGBE_MB_SUBCMD_NONE);  	netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");  } @@ -2046,7 +2059,7 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)  	struct xgbe_phy_data *phy_data = pdata->phy_data;  	/* Power off */ -	xgbe_phy_perform_ratechange(pdata, 0, 0); +	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_POWER_OFF, XGBE_MB_SUBCMD_NONE);  	phy_data->cur_mode = XGBE_MODE_UNKNOWN; @@ -2061,14 +2074,17 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)  	/* 10G/SFI */  	if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) { -		xgbe_phy_perform_ratechange(pdata, 3, 0); +		xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);  	} else {  		if (phy_data->sfp_cable_len <= 1) -			xgbe_phy_perform_ratechange(pdata, 3, 1); +			xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, +						    XGBE_MB_SUBCMD_PASSIVE_1M);  		else if (phy_data->sfp_cable_len <= 3) -			xgbe_phy_perform_ratechange(pdata, 3, 2); +			xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, +						    XGBE_MB_SUBCMD_PASSIVE_3M);  		else -			xgbe_phy_perform_ratechange(pdata, 3, 3); +			xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, +						    XGBE_MB_SUBCMD_PASSIVE_OTHER);  	}  	phy_data->cur_mode = XGBE_MODE_SFI; @@ -2083,7 +2099,7 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)  	xgbe_phy_set_redrv_mode(pdata);  	/* 1G/X */ -	xgbe_phy_perform_ratechange(pdata, 1, 3); +	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);  	phy_data->cur_mode = XGBE_MODE_X; @@ -2097,7 +2113,7 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)  	xgbe_phy_set_redrv_mode(pdata);  	/* 1G/SGMII */ -	xgbe_phy_perform_ratechange(pdata, 1, 2); +	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_SGMII);  	phy_data->cur_mode = XGBE_MODE_SGMII_1000; @@ -2111,7 +2127,7 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)  	xgbe_phy_set_redrv_mode(pdata);  	/* 100M/SGMII */ -	xgbe_phy_perform_ratechange(pdata, 1, 1); +	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_100MBITS);  	phy_data->cur_mode = XGBE_MODE_SGMII_100; @@ -2125,7 +2141,7 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)  	xgbe_phy_set_redrv_mode(pdata);  	/* 10G/KR */ -	xgbe_phy_perform_ratechange(pdata, 4, 0); +	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);  	phy_data->cur_mode = XGBE_MODE_KR; @@ -2139,7 +2155,7 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)  	xgbe_phy_set_redrv_mode(pdata);  	/* 2.5G/KX */ -	xgbe_phy_perform_ratechange(pdata, 2, 0); +	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_2_5G, XGBE_MB_SUBCMD_NONE);  	phy_data->cur_mode = XGBE_MODE_KX_2500; @@ -2153,7 +2169,7 @@ static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)  	xgbe_phy_set_redrv_mode(pdata);  	/* 1G/KX */ -	xgbe_phy_perform_ratechange(pdata, 1, 3); +	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);  	phy_data->cur_mode = XGBE_MODE_KX_1000; @@ -2640,7 +2656,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)  	}  	/* No link, attempt a receiver reset cycle */ -	if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { +	if (pdata->vdata->enable_rrc && phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {  		phy_data->rrc_count = 0;  		xgbe_phy_rrc(pdata);  	} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index b875c430222e..71f24cb47935 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -611,6 +611,31 @@ enum xgbe_mdio_mode {  	XGBE_MDIO_MODE_CL45,  }; +enum xgbe_mb_cmd { +	XGBE_MB_CMD_POWER_OFF = 0, +	XGBE_MB_CMD_SET_1G, +	XGBE_MB_CMD_SET_2_5G, +	XGBE_MB_CMD_SET_10G_SFI, +	XGBE_MB_CMD_SET_10G_KR, +	XGBE_MB_CMD_RRC +}; + +enum xgbe_mb_subcmd { +	XGBE_MB_SUBCMD_NONE = 0, + +	/* 10GbE SFP subcommands */ +	XGBE_MB_SUBCMD_ACTIVE = 0, +	XGBE_MB_SUBCMD_PASSIVE_1M, +	XGBE_MB_SUBCMD_PASSIVE_3M, +	XGBE_MB_SUBCMD_PASSIVE_OTHER, + +	/* 1GbE Mode subcommands */ +	XGBE_MB_SUBCMD_10MBITS = 0, +	XGBE_MB_SUBCMD_100MBITS, +	XGBE_MB_SUBCMD_1G_SGMII, +	XGBE_MB_SUBCMD_1G_KX +}; +  struct xgbe_phy {  	struct ethtool_link_ksettings lks; @@ -1013,6 +1038,7 @@ struct xgbe_version_data {  	unsigned int tx_desc_prefetch;  	unsigned int rx_desc_prefetch;  	unsigned int an_cdr_workaround; +	unsigned int enable_rrc;  };  struct xgbe_prv_data { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d6cfea65a714..390671640388 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1004,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev)  	xgene_enet_napi_enable(pdata);  	ret = xgene_enet_register_irq(ndev); -	if (ret) +	if (ret) { +		xgene_enet_napi_disable(pdata);  		return ret; +	}  	if (ndev->phydev) {  		phy_start(ndev->phydev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a08f221e30d4..ac4ea93bd8dd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -13,6 +13,7 @@  #include "aq_ptp.h"  #include "aq_filters.h"  #include "aq_macsec.h" +#include "aq_main.h"  #include <linux/ptp_clock_kernel.h> @@ -858,7 +859,7 @@ static int aq_set_ringparam(struct net_device *ndev,  	if (netif_running(ndev)) {  		ndev_running = true; -		dev_close(ndev); +		aq_ndev_close(ndev);  	}  	cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); @@ -874,7 +875,7 @@ static int aq_set_ringparam(struct net_device *ndev,  		goto err_exit;  	if (ndev_running) -		err = dev_open(ndev, NULL); +		err = aq_ndev_open(ndev);  err_exit:  	return err; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c index 3d0e16791e1c..7eb5851eb95d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c @@ -570,6 +570,7 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,  	ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx); +	memzero_explicit(&key_rec, sizeof(key_rec));  	return ret;  } @@ -899,6 +900,7 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,  	ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx); +	memzero_explicit(&sa_key_record, sizeof(sa_key_record));  	return ret;  } @@ -1394,26 +1396,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic)  			egress_sa_threshold_expired);  } +#define AQ_LOCKED_MDO_DEF(mdo)						\ +static int aq_locked_mdo_##mdo(struct macsec_context *ctx)		\ +{									\ +	struct aq_nic_s *nic = netdev_priv(ctx->netdev);		\ +	int ret;							\ +	mutex_lock(&nic->macsec_mutex);					\ +	ret = aq_mdo_##mdo(ctx);					\ +	mutex_unlock(&nic->macsec_mutex);				\ +	return ret;							\ +} + +AQ_LOCKED_MDO_DEF(dev_open) +AQ_LOCKED_MDO_DEF(dev_stop) +AQ_LOCKED_MDO_DEF(add_secy) +AQ_LOCKED_MDO_DEF(upd_secy) +AQ_LOCKED_MDO_DEF(del_secy) +AQ_LOCKED_MDO_DEF(add_rxsc) +AQ_LOCKED_MDO_DEF(upd_rxsc) +AQ_LOCKED_MDO_DEF(del_rxsc) +AQ_LOCKED_MDO_DEF(add_rxsa) +AQ_LOCKED_MDO_DEF(upd_rxsa) +AQ_LOCKED_MDO_DEF(del_rxsa) +AQ_LOCKED_MDO_DEF(add_txsa) +AQ_LOCKED_MDO_DEF(upd_txsa) +AQ_LOCKED_MDO_DEF(del_txsa) +AQ_LOCKED_MDO_DEF(get_dev_stats) +AQ_LOCKED_MDO_DEF(get_tx_sc_stats) +AQ_LOCKED_MDO_DEF(get_tx_sa_stats) +AQ_LOCKED_MDO_DEF(get_rx_sc_stats) +AQ_LOCKED_MDO_DEF(get_rx_sa_stats) +  const struct macsec_ops aq_macsec_ops = { -	.mdo_dev_open = aq_mdo_dev_open, -	.mdo_dev_stop = aq_mdo_dev_stop, -	.mdo_add_secy = aq_mdo_add_secy, -	.mdo_upd_secy = aq_mdo_upd_secy, -	.mdo_del_secy = aq_mdo_del_secy, -	.mdo_add_rxsc = aq_mdo_add_rxsc, -	.mdo_upd_rxsc = aq_mdo_upd_rxsc, -	.mdo_del_rxsc = aq_mdo_del_rxsc, -	.mdo_add_rxsa = aq_mdo_add_rxsa, -	.mdo_upd_rxsa = aq_mdo_upd_rxsa, -	.mdo_del_rxsa = aq_mdo_del_rxsa, -	.mdo_add_txsa = aq_mdo_add_txsa, -	.mdo_upd_txsa = aq_mdo_upd_txsa, -	.mdo_del_txsa = aq_mdo_del_txsa, -	.mdo_get_dev_stats = aq_mdo_get_dev_stats, -	.mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats, -	.mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats, -	.mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats, -	.mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats, +	.mdo_dev_open = aq_locked_mdo_dev_open, +	.mdo_dev_stop = aq_locked_mdo_dev_stop, +	.mdo_add_secy = aq_locked_mdo_add_secy, +	.mdo_upd_secy = aq_locked_mdo_upd_secy, +	.mdo_del_secy = aq_locked_mdo_del_secy, +	.mdo_add_rxsc = aq_locked_mdo_add_rxsc, +	.mdo_upd_rxsc = aq_locked_mdo_upd_rxsc, +	.mdo_del_rxsc = aq_locked_mdo_del_rxsc, +	.mdo_add_rxsa = aq_locked_mdo_add_rxsa, +	.mdo_upd_rxsa = aq_locked_mdo_upd_rxsa, +	.mdo_del_rxsa = aq_locked_mdo_del_rxsa, +	.mdo_add_txsa = aq_locked_mdo_add_txsa, +	.mdo_upd_txsa = aq_locked_mdo_upd_txsa, +	.mdo_del_txsa = aq_locked_mdo_del_txsa, +	.mdo_get_dev_stats = aq_locked_mdo_get_dev_stats, +	.mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats, +	.mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats, +	.mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats, +	.mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats,  };  int aq_macsec_init(struct aq_nic_s *nic) @@ -1435,6 +1468,7 @@ int aq_macsec_init(struct aq_nic_s *nic)  	nic->ndev->features |= NETIF_F_HW_MACSEC;  	nic->ndev->macsec_ops = &aq_macsec_ops; +	mutex_init(&nic->macsec_mutex);  	return 0;  } @@ -1458,7 +1492,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)  	if (!nic->macsec_cfg)  		return 0; -	rtnl_lock(); +	mutex_lock(&nic->macsec_mutex);  	if (nic->aq_fw_ops->send_macsec_req) {  		struct macsec_cfg_request cfg = { 0 }; @@ -1507,7 +1541,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)  	ret = aq_apply_macsec_cfg(nic);  unlock: -	rtnl_unlock(); +	mutex_unlock(&nic->macsec_mutex);  	return ret;  } @@ -1519,9 +1553,9 @@ void aq_macsec_work(struct aq_nic_s *nic)  	if (!netif_carrier_ok(nic->ndev))  		return; -	rtnl_lock(); +	mutex_lock(&nic->macsec_mutex);  	aq_check_txsa_expiration(nic); -	rtnl_unlock(); +	mutex_unlock(&nic->macsec_mutex);  }  int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic) @@ -1532,21 +1566,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)  	if (!cfg)  		return 0; +	mutex_lock(&nic->macsec_mutex); +  	for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {  		if (!test_bit(i, &cfg->rxsc_idx_busy))  			continue;  		cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);  	} +	mutex_unlock(&nic->macsec_mutex);  	return cnt;  }  int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)  { +	int cnt; +  	if (!nic->macsec_cfg)  		return 0; -	return hweight_long(nic->macsec_cfg->txsc_idx_busy); +	mutex_lock(&nic->macsec_mutex); +	cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy); +	mutex_unlock(&nic->macsec_mutex); + +	return cnt;  }  int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic) @@ -1557,12 +1600,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)  	if (!cfg)  		return 0; +	mutex_lock(&nic->macsec_mutex); +  	for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {  		if (!test_bit(i, &cfg->txsc_idx_busy))  			continue;  		cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);  	} +	mutex_unlock(&nic->macsec_mutex);  	return cnt;  } @@ -1634,6 +1680,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)  	if (!cfg)  		return data; +	mutex_lock(&nic->macsec_mutex); +  	aq_macsec_update_stats(nic);  	common_stats = &cfg->stats; @@ -1716,5 +1764,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)  	data += i; +	mutex_unlock(&nic->macsec_mutex); +  	return data;  } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 8a0af371e7dc..77609dc0a08d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -58,7 +58,7 @@ struct net_device *aq_ndev_alloc(void)  	return ndev;  } -static int aq_ndev_open(struct net_device *ndev) +int aq_ndev_open(struct net_device *ndev)  {  	struct aq_nic_s *aq_nic = netdev_priv(ndev);  	int err = 0; @@ -88,7 +88,7 @@ err_exit:  	return err;  } -static int aq_ndev_close(struct net_device *ndev) +int aq_ndev_close(struct net_device *ndev)  {  	struct aq_nic_s *aq_nic = netdev_priv(ndev);  	int err = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h index 99870865f66d..a78c1a168d8e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -16,5 +16,7 @@ DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);  void aq_ndev_schedule_work(struct work_struct *work);  struct net_device *aq_ndev_alloc(void); +int aq_ndev_open(struct net_device *ndev); +int aq_ndev_close(struct net_device *ndev);  #endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 935ba889bd9a..ad33f8586532 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -157,6 +157,8 @@ struct aq_nic_s {  	struct mutex fwreq_mutex;  #if IS_ENABLED(CONFIG_MACSEC)  	struct aq_macsec_cfg *macsec_cfg; +	/* mutex to protect data in macsec_cfg */ +	struct mutex macsec_mutex;  #endif  	/* PTP support */  	struct aq_ptp_s *aq_ptp; diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c index 36c7cf05630a..431924959520 100644 --- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c +++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c @@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw,  			 u16 table_index)  {  	u16 packed_record[18]; +	int ret;  	if (table_index >= NUMROWS_INGRESSSAKEYRECORD)  		return -EINVAL; @@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw,  	packed_record[16] = rec->key_len & 0x3; -	return set_raw_ingress_record(hw, packed_record, 18, 2, -				      ROWOFFSET_INGRESSSAKEYRECORD + -					      table_index); +	ret = set_raw_ingress_record(hw, packed_record, 18, 2, +				     ROWOFFSET_INGRESSSAKEYRECORD + +				     table_index); + +	memzero_explicit(packed_record, sizeof(packed_record)); +	return ret;  }  int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw, @@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw,  	ret = set_raw_egress_record(hw, packed_record, 8, 2,  				    ROWOFFSET_EGRESSSAKEYRECORD + table_index);  	if (unlikely(ret)) -		return ret; +		goto clear_key;  	ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,  				    ROWOFFSET_EGRESSSAKEYRECORD + table_index -  					    32); -	if (unlikely(ret)) -		return ret; -	return 0; +clear_key: +	memzero_explicit(packed_record, sizeof(packed_record)); +	return ret;  }  int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw, diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index cc932b3cf873..4a1efe9b37d0 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1427,7 +1427,7 @@ static int ag71xx_open(struct net_device *ndev)  	if (ret) {  		netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",  			  ret); -		goto err; +		return ret;  	}  	max_frame_len = ag71xx_max_frame_len(ndev->mtu); @@ -1448,6 +1448,7 @@ static int ag71xx_open(struct net_device *ndev)  err:  	ag71xx_rings_cleanup(ag); +	phylink_disconnect_phy(ag->phylink);  	return ret;  } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index f4e1ca68d831..f4ca0c6c0f51 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -71,6 +71,7 @@ config BCM63XX_ENET  config BCMGENET  	tristate "Broadcom GENET internal MAC support"  	depends on HAS_IOMEM +	depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835  	select MII  	select PHYLIB  	select FIXED_PHY diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 93ccf549e2ed..a737b1913cf9 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -561,8 +561,6 @@ static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_devic  	if (++ring->write_idx == ring->length - 1)  		ring->write_idx = 0; -	enet->netdev->stats.tx_bytes += skb->len; -	enet->netdev->stats.tx_packets++;  	return NETDEV_TX_OK;  } @@ -635,6 +633,7 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)  	struct bcm4908_enet_dma_ring_bd *buf_desc;  	struct bcm4908_enet_dma_ring_slot *slot;  	struct device *dev = enet->dev; +	unsigned int bytes = 0;  	int handled = 0;  	while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) { @@ -645,12 +644,17 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)  		dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);  		dev_kfree_skb(slot->skb); -		if (++tx_ring->read_idx == tx_ring->length) -			tx_ring->read_idx = 0;  		handled++; +		bytes += slot->len; + +		if (++tx_ring->read_idx == tx_ring->length) +			tx_ring->read_idx = 0;  	} +	enet->netdev->stats.tx_packets += handled; +	enet->netdev->stats.tx_bytes += bytes; +  	if (handled < weight) {  		napi_complete_done(napi, handled);  		bcm4908_enet_dma_ring_intrs_on(enet, tx_ring); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 867f14c30e09..425d6ccd5413 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1991,6 +1991,9 @@ static int bcm_sysport_open(struct net_device *dev)  		goto out_clk_disable;  	} +	/* Indicate that the MAC is responsible for PHY PM */ +	phydev->mac_managed_pm = true; +  	/* Reset house keeping link status */  	priv->old_duplex = -1;  	priv->old_link = -1; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 5fb3af5670ec..3038386a5afd 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1568,7 +1568,6 @@ void bgmac_enet_remove(struct bgmac *bgmac)  	phy_disconnect(bgmac->net_dev->phydev);  	netif_napi_del(&bgmac->napi);  	bgmac_dma_free(bgmac); -	free_netdev(bgmac->net_dev);  }  EXPORT_SYMBOL_GPL(bgmac_enet_remove); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 11d15cd03600..77d4cb4ad782 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -795,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)  static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)  { -	struct pci_dev *dev;  	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); +	struct pci_dev *dev; +	bool pending;  	if (!vf)  		return false;  	dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn); -	if (dev) -		return bnx2x_is_pcie_pending(dev); -	return false; +	if (!dev) +		return false; +	pending = bnx2x_is_pcie_pending(dev); +	pci_dev_put(dev); + +	return pending;  }  int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 04cf7684f1b0..9f8a6ce4b356 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -9983,17 +9983,12 @@ static int bnxt_try_recover_fw(struct bnxt *bp)  	return -ENODEV;  } -int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) +static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)  {  	struct bnxt_hw_resc *hw_resc = &bp->hw_resc; -	int rc;  	if (!BNXT_NEW_RM(bp)) -		return 0; /* no resource reservations required */ - -	rc = bnxt_hwrm_func_resc_qcaps(bp, true); -	if (rc) -		netdev_err(bp->dev, "resc_qcaps failed\n"); +		return; /* no resource reservations required */  	hw_resc->resv_cp_rings = 0;  	hw_resc->resv_stat_ctxs = 0; @@ -10006,6 +10001,20 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)  		bp->tx_nr_rings = 0;  		bp->rx_nr_rings = 0;  	} +} + +int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) +{ +	int rc; + +	if (!BNXT_NEW_RM(bp)) +		return 0; /* no resource reservations required */ + +	rc = bnxt_hwrm_func_resc_qcaps(bp, true); +	if (rc) +		netdev_err(bp->dev, "resc_qcaps failed\n"); + +	bnxt_clear_reservations(bp, fw_reset);  	return rc;  } @@ -12894,8 +12903,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,  	rcu_read_lock();  	hlist_for_each_entry_rcu(fltr, head, hash) {  		if (bnxt_fltr_match(fltr, new_fltr)) { +			rc = fltr->sw_id;  			rcu_read_unlock(); -			rc = 0;  			goto err_free;  		}  	} @@ -13913,7 +13922,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)  	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;  	struct net_device *netdev = pci_get_drvdata(pdev);  	struct bnxt *bp = netdev_priv(netdev); -	int err = 0, off; +	int retry = 0; +	int err = 0; +	int off;  	netdev_info(bp->dev, "PCI Slot Reset\n"); @@ -13941,11 +13952,36 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)  		pci_restore_state(pdev);  		pci_save_state(pdev); +		bnxt_inv_fw_health_reg(bp); +		bnxt_try_map_fw_health_reg(bp); + +		/* In some PCIe AER scenarios, firmware may take up to +		 * 10 seconds to become ready in the worst case. +		 */ +		do { +			err = bnxt_try_recover_fw(bp); +			if (!err) +				break; +			retry++; +		} while (retry < BNXT_FW_SLOT_RESET_RETRY); + +		if (err) { +			dev_err(&pdev->dev, "Firmware not ready\n"); +			goto reset_exit; +		} +  		err = bnxt_hwrm_func_reset(bp);  		if (!err)  			result = PCI_ERS_RESULT_RECOVERED; + +		bnxt_ulp_irq_stop(bp); +		bnxt_clear_int_mode(bp); +		err = bnxt_init_int_mode(bp); +		bnxt_ulp_irq_restart(bp, err);  	} +reset_exit: +	bnxt_clear_reservations(bp, true);  	rtnl_unlock();  	return result; @@ -14001,8 +14037,16 @@ static struct pci_driver bnxt_pci_driver = {  static int __init bnxt_init(void)  { +	int err; +  	bnxt_debug_init(); -	return pci_register_driver(&bnxt_pci_driver); +	err = pci_register_driver(&bnxt_pci_driver); +	if (err) { +		bnxt_debug_exit(); +		return err; +	} + +	return 0;  }  static void __exit bnxt_exit(void) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index b1b17f911300..d5fa43cfe524 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1621,6 +1621,7 @@ struct bnxt_fw_health {  #define BNXT_FW_RETRY			5  #define BNXT_FW_IF_RETRY		10 +#define BNXT_FW_SLOT_RESET_RETRY	4  enum board_idx {  	BCM57301, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index a36803e79e92..8a6f788f6294 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -613,6 +613,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti  static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)  { +	bool rc = false;  	u32 datalen;  	u16 index;  	u8 *buf; @@ -632,20 +633,20 @@ static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)  	if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {  		NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error"); -		goto err; +		goto done;  	}  	if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,  			     BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {  		NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error"); -		goto err; +		goto done;  	} -	return true; +	rc = true; -err: +done:  	kfree(buf); -	return false; +	return rc;  }  static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f57e524c7e30..8cad15c458b3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -162,7 +162,7 @@ static int bnxt_set_coalesce(struct net_device *dev,  	}  reset_coalesce: -	if (netif_running(dev)) { +	if (test_bit(BNXT_STATE_OPEN, &bp->state)) {  		if (update_stats) {  			rc = bnxt_close_nic(bp, true, false);  			if (!rc) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index b01d42928a53..132442f16fe6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -476,7 +476,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)  		memset(ctx->resp, 0, PAGE_SIZE);  	req_type = le16_to_cpu(ctx->req->req_type); -	if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) { +	if (BNXT_NO_FW_ACCESS(bp) && +	    (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) {  		netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",  			   req_type);  		goto exit; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 2198e35d9e18..7926aaef8f0c 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1027,16 +1027,14 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)  	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;  	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, -					   &udev->l2_ring_map, -					   GFP_KERNEL | __GFP_COMP); +					   &udev->l2_ring_map, GFP_KERNEL);  	if (!udev->l2_ring)  		return -ENOMEM;  	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;  	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);  	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, -					  &udev->l2_buf_map, -					  GFP_KERNEL | __GFP_COMP); +					  &udev->l2_buf_map, GFP_KERNEL);  	if (!udev->l2_buf) {  		__cnic_free_uio_rings(udev);  		return -ENOMEM; @@ -4105,7 +4103,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)  	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)  		atomic_set(&cp->csk_tbl[i].ref_count, 0); -	port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE); +	port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);  	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,  			     CNIC_LOCAL_PORT_MIN, port_id)) {  		cnic_cm_free_mem(dev); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 51c9fd6f68a4..4f63f1ba3161 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -806,6 +806,7 @@ static int macb_mii_probe(struct net_device *dev)  	bp->phylink_config.dev = &dev->dev;  	bp->phylink_config.type = PHYLINK_NETDEV; +	bp->phylink_config.mac_managed_pm = true;  	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {  		bp->phylink_config.poll_fixed_state = true; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index d312bd594935..98793b2ac2c7 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1794,13 +1794,10 @@ static int liquidio_open(struct net_device *netdev)  	ifstate_set(lio, LIO_IFSTATE_RUNNING); -	if (OCTEON_CN23XX_PF(oct)) { -		if (!oct->msix_on) -			if (setup_tx_poll_fn(netdev)) -				return -1; -	} else { -		if (setup_tx_poll_fn(netdev)) -			return -1; +	if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { +		ret = setup_tx_poll_fn(netdev); +		if (ret) +			goto err_poll;  	}  	netif_tx_start_all_queues(netdev); @@ -1813,7 +1810,7 @@ static int liquidio_open(struct net_device *netdev)  	/* tell Octeon to start forwarding packets to host */  	ret = send_rx_ctrl_cmd(lio, 1);  	if (ret) -		return ret; +		goto err_rx_ctrl;  	/* start periodical statistics fetch */  	INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); @@ -1824,6 +1821,27 @@ static int liquidio_open(struct net_device *netdev)  	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",  		 netdev->name); +	return 0; + +err_rx_ctrl: +	if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) +		cleanup_tx_poll_fn(netdev); +err_poll: +	if (lio->ptp_clock) { +		ptp_clock_unregister(lio->ptp_clock); +		lio->ptp_clock = NULL; +	} + +	if (oct->props[lio->ifidx].napi_enabled == 1) { +		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) +			napi_disable(napi); + +		oct->props[lio->ifidx].napi_enabled = 0; + +		if (OCTEON_CN23XX_PF(oct)) +			oct->droq[0]->ops.poll_mode = 0; +	} +  	return ret;  } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 98f3dc460ca7..f2f95493ec89 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2239,7 +2239,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	err = register_netdev(netdev);  	if (err) {  		dev_err(dev, "Failed to register netdevice\n"); -		goto err_unregister_interrupts; +		goto err_destroy_workqueue;  	}  	nic->msg_enable = debug; @@ -2248,6 +2248,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	return 0; +err_destroy_workqueue: +	destroy_workqueue(nic->nicvf_rx_mode_wq);  err_unregister_interrupts:  	nicvf_unregister_interrupts(nic);  err_free_netdev: diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 2f6484dc186a..7eb2ddbe9bad 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1436,8 +1436,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,  		return AE_OK;  	} -	if (strncmp(string.pointer, bgx_sel, 4)) +	if (strncmp(string.pointer, bgx_sel, 4)) { +		kfree(string.pointer);  		return AE_OK; +	}  	acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,  			    bgx_acpi_register_phy, NULL, bgx, NULL); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index a52e6b6e2876..9b84c8d8d309 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1301,6 +1301,7 @@ static int cxgb_up(struct adapter *adap)  		if (ret < 0) {  			CH_ERR(adap, "failed to bind qsets, err %d\n", ret);  			t3_intr_disable(adap); +			quiesce_rx(adap);  			free_irq_resources(adap);  			err = ret;  			goto out; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 54db79f4dcfe..63b2bd084130 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev)  	 */  	err = t4vf_update_port_info(pi);  	if (err < 0) -		return err; +		goto err_unwind;  	/*  	 * Note that this interface is up and start everything up ... diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c index a4256087ac82..ae6b17b96bf1 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c @@ -919,8 +919,8 @@ static int csk_wait_memory(struct chtls_dev *cdev,  	current_timeo = *timeo_p;  	noblock = (*timeo_p ? false : true);  	if (csk_mem_free(cdev, sk)) { -		current_timeo = prandom_u32_max(HZ / 5) + 2; -		vm_wait = prandom_u32_max(HZ / 5) + 2; +		current_timeo = get_random_u32_below(HZ / 5) + 2; +		vm_wait = get_random_u32_below(HZ / 5) + 2;  	}  	add_wait_queue(sk_sleep(sk), &wait); diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c index a523ddda7609..de7105a84747 100644 --- a/drivers/net/ethernet/davicom/dm9051.c +++ b/drivers/net/ethernet/davicom/dm9051.c @@ -798,8 +798,10 @@ static int dm9051_loop_rx(struct board_info *db)  		}  		ret = dm9051_stop_mrcmd(db); -		if (ret) +		if (ret) { +			dev_kfree_skb(skb);  			return ret; +		}  		skb->protocol = eth_type_trans(skb, db->ndev);  		if (db->ndev->features & NETIF_F_RXCSUM) diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 48fb391951dd..13d5ff4e0e02 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -542,6 +542,27 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)  	return (budget != 0);  } +static bool tsnep_tx_pending(struct tsnep_tx *tx) +{ +	unsigned long flags; +	struct tsnep_tx_entry *entry; +	bool pending = false; + +	spin_lock_irqsave(&tx->lock, flags); + +	if (tx->read != tx->write) { +		entry = &tx->entry[tx->read]; +		if ((__le32_to_cpu(entry->desc_wb->properties) & +		     TSNEP_TX_DESC_OWNER_MASK) == +		    (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) +			pending = true; +	} + +	spin_unlock_irqrestore(&tx->lock, flags); + +	return pending; +} +  static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,  			 int queue_index, struct tsnep_tx *tx)  { @@ -821,6 +842,19 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,  	return done;  } +static bool tsnep_rx_pending(struct tsnep_rx *rx) +{ +	struct tsnep_rx_entry *entry; + +	entry = &rx->entry[rx->read]; +	if ((__le32_to_cpu(entry->desc_wb->properties) & +	     TSNEP_DESC_OWNER_COUNTER_MASK) == +	    (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) +		return true; + +	return false; +} +  static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,  			 int queue_index, struct tsnep_rx *rx)  { @@ -866,6 +900,17 @@ static void tsnep_rx_close(struct tsnep_rx *rx)  	tsnep_rx_ring_cleanup(rx);  } +static bool tsnep_pending(struct tsnep_queue *queue) +{ +	if (queue->tx && tsnep_tx_pending(queue->tx)) +		return true; + +	if (queue->rx && tsnep_rx_pending(queue->rx)) +		return true; + +	return false; +} +  static int tsnep_poll(struct napi_struct *napi, int budget)  {  	struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, @@ -886,9 +931,19 @@ static int tsnep_poll(struct napi_struct *napi, int budget)  	if (!complete)  		return budget; -	if (likely(napi_complete_done(napi, done))) +	if (likely(napi_complete_done(napi, done))) {  		tsnep_enable_irq(queue->adapter, queue->irq_mask); +		/* reschedule if work is already pending, prevent rotten packets +		 * which are transmitted or received after polling but before +		 * interrupt enable +		 */ +		if (tsnep_pending(queue)) { +			tsnep_disable_irq(queue->adapter, queue->irq_mask); +			napi_schedule(napi); +		} +	} +  	return min(done, budget - 1);  } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 31cfa121333d..fc68a32ce2f7 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -221,8 +221,8 @@ static int dpaa_netdev_init(struct net_device *net_dev,  	net_dev->netdev_ops = dpaa_ops;  	mac_addr = mac_dev->addr; -	net_dev->mem_start = (unsigned long)mac_dev->vaddr; -	net_dev->mem_end = (unsigned long)mac_dev->vaddr_end; +	net_dev->mem_start = (unsigned long)priv->mac_dev->res->start; +	net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;  	net_dev->min_mtu = ETH_MIN_MTU;  	net_dev->max_mtu = dpaa_get_max_mtu(); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c index 258eb6c8f4c0..4fee74c024bd 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c @@ -18,7 +18,7 @@ static ssize_t dpaa_eth_show_addr(struct device *dev,  	if (mac_dev)  		return sprintf(buf, "%llx", -				(unsigned long long)mac_dev->vaddr); +				(unsigned long long)mac_dev->res->start);  	else  		return sprintf(buf, "none");  } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index cacd454ac696..c39b866e2582 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -132,6 +132,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,  						 DMA_TO_DEVICE);  	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {  		dev_err(dev, "DMA mapping failed\n"); +		kfree(cmd_buff);  		return -EFAULT;  	} @@ -142,6 +143,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,  			 DMA_TO_DEVICE);  	if (err) {  		dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err); +		kfree(cmd_buff);  		return err;  	} @@ -172,6 +174,7 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,  						 DMA_TO_DEVICE);  	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {  		dev_err(dev, "DMA mapping failed\n"); +		kfree(cmd_buff);  		return -EFAULT;  	} @@ -182,6 +185,7 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,  			 DMA_TO_DEVICE);  	if (err) {  		dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err); +		kfree(cmd_buff);  		return err;  	} diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 54bc92fc6bf0..8671591cb750 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2058,7 +2058,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)  	/* enable Tx ints by setting pkt thr to 1 */  	enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); -	tbmr = ENETC_TBMR_EN; +	tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);  	if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)  		tbmr |= ENETC_TBMR_VIH; @@ -2090,7 +2090,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)  	else  		enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); +	/* Also prepare the consumer index in case page allocation never +	 * succeeds. In that case, hardware will never advance producer index +	 * to match consumer index, and will drop all frames. +	 */  	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); +	enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);  	/* enable Rx ints by setting pkt thr to 1 */  	enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); @@ -2456,7 +2461,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)  		/* Reset all ring priorities to 0 */  		for (i = 0; i < priv->num_tx_rings; i++) {  			tx_ring = priv->tx_ring[i]; -			enetc_set_bdr_prio(hw, tx_ring->index, 0); +			tx_ring->prio = 0; +			enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);  		}  		return 0; @@ -2475,7 +2481,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)  	 */  	for (i = 0; i < num_tc; i++) {  		tx_ring = priv->tx_ring[i]; -		enetc_set_bdr_prio(hw, tx_ring->index, i); +		tx_ring->prio = i; +		enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);  	}  	/* Reset the number of netdev queues based on the TC count */ diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 161930a65f61..c6d8cc15c270 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -95,6 +95,7 @@ struct enetc_bdr {  		void __iomem *rcir;  	};  	u16 index; +	u16 prio;  	int bd_count; /* # of BDs */  	int next_to_use;  	int next_to_clean; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index a842e1999122..fcebb54224c0 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -137,6 +137,7 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)  	struct tc_taprio_qopt_offload *taprio = type_data;  	struct enetc_ndev_priv *priv = netdev_priv(ndev);  	struct enetc_hw *hw = &priv->si->hw; +	struct enetc_bdr *tx_ring;  	int err;  	int i; @@ -145,16 +146,20 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)  		if (priv->tx_ring[i]->tsd_enable)  			return -EBUSY; -	for (i = 0; i < priv->num_tx_rings; i++) -		enetc_set_bdr_prio(hw, priv->tx_ring[i]->index, -				   taprio->enable ? i : 0); +	for (i = 0; i < priv->num_tx_rings; i++) { +		tx_ring = priv->tx_ring[i]; +		tx_ring->prio = taprio->enable ? i : 0; +		enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); +	}  	err = enetc_setup_taprio(ndev, taprio); - -	if (err) -		for (i = 0; i < priv->num_tx_rings; i++) -			enetc_set_bdr_prio(hw, priv->tx_ring[i]->index, -					   taprio->enable ? 0 : i); +	if (err) { +		for (i = 0; i < priv->num_tx_rings; i++) { +			tx_ring = priv->tx_ring[i]; +			tx_ring->prio = taprio->enable ? 0 : i; +			enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); +		} +	}  	return err;  } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 98d5cd313fdd..23e1a94b9ce4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -74,7 +74,7 @@  #include "fec.h"  static void set_multicast_list(struct net_device *ndev); -static void fec_enet_itr_coal_init(struct net_device *ndev); +static void fec_enet_itr_coal_set(struct net_device *ndev);  #define DRIVER_NAME	"fec" @@ -713,7 +713,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,  		dev_kfree_skb_any(skb);  		if (net_ratelimit())  			netdev_err(ndev, "Tx DMA memory map failed\n"); -		return NETDEV_TX_BUSY; +		return NETDEV_TX_OK;  	}  	bdp->cbd_datlen = cpu_to_fec16(size); @@ -775,7 +775,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,  			dev_kfree_skb_any(skb);  			if (net_ratelimit())  				netdev_err(ndev, "Tx DMA memory map failed\n"); -			return NETDEV_TX_BUSY; +			return NETDEV_TX_OK;  		}  	} @@ -1220,8 +1220,8 @@ fec_restart(struct net_device *ndev)  		writel(0, fep->hwp + FEC_IMASK);  	/* Init the interrupt coalescing */ -	fec_enet_itr_coal_init(ndev); - +	if (fep->quirks & FEC_QUIRK_HAS_COALESCE) +		fec_enet_itr_coal_set(ndev);  }  static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) @@ -2432,6 +2432,31 @@ static u32 fec_enet_register_offset[] = {  	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,  	IEEE_R_FDXFC, IEEE_R_OCTETS_OK  }; +/* for i.MX6ul */ +static u32 fec_enet_register_offset_6ul[] = { +	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, +	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, +	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, +	FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, +	FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, +	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, +	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, +	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, +	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, +	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, +	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, +	RMON_T_P_GTE2048, RMON_T_OCTETS, +	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, +	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, +	IEEE_T_FDXFC, IEEE_T_OCTETS_OK, +	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, +	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, +	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, +	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, +	RMON_R_P_GTE2048, RMON_R_OCTETS, +	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, +	IEEE_R_FDXFC, IEEE_R_OCTETS_OK +};  #else  static __u32 fec_enet_register_version = 1;  static u32 fec_enet_register_offset[] = { @@ -2456,7 +2481,24 @@ static void fec_enet_get_regs(struct net_device *ndev,  	u32 *buf = (u32 *)regbuf;  	u32 i, off;  	int ret; +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ +	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ +	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +	u32 *reg_list; +	u32 reg_cnt; +	if (!of_machine_is_compatible("fsl,imx6ul")) { +		reg_list = fec_enet_register_offset; +		reg_cnt = ARRAY_SIZE(fec_enet_register_offset); +	} else { +		reg_list = fec_enet_register_offset_6ul; +		reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); +	} +#else +	/* coldfire */ +	static u32 *reg_list = fec_enet_register_offset; +	static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); +#endif  	ret = pm_runtime_resume_and_get(dev);  	if (ret < 0)  		return; @@ -2465,8 +2507,8 @@ static void fec_enet_get_regs(struct net_device *ndev,  	memset(buf, 0, regs->len); -	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { -		off = fec_enet_register_offset[i]; +	for (i = 0; i < reg_cnt; i++) { +		off = reg_list[i];  		if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&  		    !(fep->quirks & FEC_QUIRK_HAS_FRREG)) @@ -2814,19 +2856,6 @@ static int fec_enet_set_coalesce(struct net_device *ndev,  	return 0;  } -static void fec_enet_itr_coal_init(struct net_device *ndev) -{ -	struct ethtool_coalesce ec; - -	ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; -	ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; - -	ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; -	ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; - -	fec_enet_set_coalesce(ndev, &ec, NULL, NULL); -} -  static int fec_enet_get_tunable(struct net_device *netdev,  				const struct ethtool_tunable *tuna,  				void *data) @@ -3581,6 +3610,10 @@ static int fec_enet_init(struct net_device *ndev)  	fep->rx_align = 0x3;  	fep->tx_align = 0x3;  #endif +	fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; +	fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; +	fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; +	fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;  	/* Check mask of the streaming and coherent API */  	ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 7b7526fd7da3..13e67f2864be 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -279,7 +279,6 @@ static int mac_probe(struct platform_device *_of_dev)  	struct device_node	*mac_node, *dev_node;  	struct mac_device	*mac_dev;  	struct platform_device	*of_dev; -	struct resource		*res;  	struct mac_priv_s	*priv;  	struct fman_mac_params	 params;  	u32			 val; @@ -338,24 +337,25 @@ static int mac_probe(struct platform_device *_of_dev)  	of_node_put(dev_node);  	/* Get the address of the memory mapped registers */ -	res = platform_get_mem_or_io(_of_dev, 0); -	if (!res) { +	mac_dev->res = platform_get_mem_or_io(_of_dev, 0); +	if (!mac_dev->res) {  		dev_err(dev, "could not get registers\n");  		return -EINVAL;  	} -	err = devm_request_resource(dev, fman_get_mem_region(priv->fman), res); +	err = devm_request_resource(dev, fman_get_mem_region(priv->fman), +				    mac_dev->res);  	if (err) {  		dev_err_probe(dev, err, "could not request resource\n");  		return err;  	} -	mac_dev->vaddr = devm_ioremap(dev, res->start, resource_size(res)); +	mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start, +				      resource_size(mac_dev->res));  	if (!mac_dev->vaddr) {  		dev_err(dev, "devm_ioremap() failed\n");  		return -EIO;  	} -	mac_dev->vaddr_end = mac_dev->vaddr + resource_size(res);  	if (!of_device_is_available(mac_node))  		return -ENODEV; @@ -487,12 +487,21 @@ _return_of_node_put:  	return err;  } +static int mac_remove(struct platform_device *pdev) +{ +	struct mac_device *mac_dev = platform_get_drvdata(pdev); + +	platform_device_unregister(mac_dev->priv->eth_dev); +	return 0; +} +  static struct platform_driver mac_driver = {  	.driver = {  		.name		= KBUILD_MODNAME,  		.of_match_table	= mac_match,  	},  	.probe		= mac_probe, +	.remove		= mac_remove,  };  builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index b95d384271bd..13b69ca5f00c 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -20,8 +20,8 @@ struct mac_priv_s;  struct mac_device {  	void __iomem		*vaddr; -	void __iomem		*vaddr_end;  	struct device		*dev; +	struct resource		*res;  	u8			 addr[ETH_ALEN];  	struct fman_port	*port[2];  	u32			 if_support; diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 93846bace028..ce2571c16e43 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit)  		skb->protocol = eth_type_trans(skb, dev);  		napi_gro_receive(&priv->napi, skb);  		dev->stats.rx_packets++; -		dev->stats.rx_bytes += skb->len; +		dev->stats.rx_bytes += len;  next:  		pos = (pos + 1) % rxq->num;  		if (rx_pkts_num >= limit) diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index ffcf797dfa90..f867e9531117 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit)  		skb->protocol = eth_type_trans(skb, dev);  		napi_gro_receive(&priv->napi, skb);  		dev->stats.rx_packets++; -		dev->stats.rx_bytes += skb->len; +		dev->stats.rx_bytes += len;  next:  		pos = dma_ring_incr(pos, RX_DESC_NUM);  	} diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 00fafc0f8512..430eccea8e5e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)  	hdev->cls_dev.release = hnae_release;  	(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);  	ret = device_register(&hdev->cls_dev); -	if (ret) +	if (ret) { +		put_device(&hdev->cls_dev);  		return ret; +	}  	__module_get(THIS_MODULE); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 0179fc288f5f..17137de9338c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -819,7 +819,6 @@ struct hnae3_knic_private_info {  	const struct hnae3_dcb_ops *dcb_ops;  	u16 int_rl_setting; -	enum pkt_hash_types rss_type;  	void __iomem *io_base;  }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c index e23729ac3bb8..ae2736549526 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c @@ -191,23 +191,6 @@ u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)  	return HCLGE_COMM_RSS_KEY_SIZE;  } -void hclge_comm_get_rss_type(struct hnae3_handle *nic, -			     struct hclge_comm_rss_tuple_cfg *rss_tuple_sets) -{ -	if (rss_tuple_sets->ipv4_tcp_en || -	    rss_tuple_sets->ipv4_udp_en || -	    rss_tuple_sets->ipv4_sctp_en || -	    rss_tuple_sets->ipv6_tcp_en || -	    rss_tuple_sets->ipv6_udp_en || -	    rss_tuple_sets->ipv6_sctp_en) -		nic->kinfo.rss_type = PKT_HASH_TYPE_L4; -	else if (rss_tuple_sets->ipv4_fragment_en || -		 rss_tuple_sets->ipv6_fragment_en) -		nic->kinfo.rss_type = PKT_HASH_TYPE_L3; -	else -		nic->kinfo.rss_type = PKT_HASH_TYPE_NONE; -} -  int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,  			       const u8 hfunc, u8 *hash_algo)  { @@ -344,9 +327,6 @@ int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,  	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;  	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; -	if (is_pf) -		hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets); -  	ret = hclge_comm_cmd_send(hw, &desc, 1);  	if (ret)  		dev_err(&hw->cmq.csq.pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h index 946d166a452d..92af3d2980d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h @@ -95,8 +95,6 @@ struct hclge_comm_rss_tc_mode_cmd {  };  u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle); -void hclge_comm_get_rss_type(struct hnae3_handle *nic, -			     struct hclge_comm_rss_tuple_cfg *rss_tuple_sets);  void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,  				   struct hclge_comm_rss_cfg *rss_cfg);  int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 4cb2421e71a7..028577943ec5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -105,26 +105,28 @@ static const struct pci_device_id hns3_pci_tbl[] = {  };  MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); -#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \ +#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \  	{	ptype, \  		l, \  		CHECKSUM_##s, \  		HNS3_L3_TYPE_##t, \ -		1 } +		1, \ +		h}  #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \ -		{ ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 } +		{ ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \ +		  PKT_HASH_TYPE_NONE }  static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {  	HNS3_RX_PTYPE_UNUSED_ENTRY(0), -	HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP), -	HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP), -	HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP), -	HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL), -	HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL), -	HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL), -	HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM), -	HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL), +	HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE),  	HNS3_RX_PTYPE_UNUSED_ENTRY(9),  	HNS3_RX_PTYPE_UNUSED_ENTRY(10),  	HNS3_RX_PTYPE_UNUSED_ENTRY(11), @@ -132,36 +134,36 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {  	HNS3_RX_PTYPE_UNUSED_ENTRY(13),  	HNS3_RX_PTYPE_UNUSED_ENTRY(14),  	HNS3_RX_PTYPE_UNUSED_ENTRY(15), -	HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL), -	HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4), -	HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4), -	HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4), -	HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4), -	HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4), -	HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4), +	HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),  	HNS3_RX_PTYPE_UNUSED_ENTRY(26),  	HNS3_RX_PTYPE_UNUSED_ENTRY(27),  	HNS3_RX_PTYPE_UNUSED_ENTRY(28), -	HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL), -	HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL), -	HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4), -	HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4), -	HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4), -	HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4), +	HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),  	HNS3_RX_PTYPE_UNUSED_ENTRY(38), -	HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6), -	HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6), -	HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6), -	HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6), +	HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),  	HNS3_RX_PTYPE_UNUSED_ENTRY(46),  	HNS3_RX_PTYPE_UNUSED_ENTRY(47),  	HNS3_RX_PTYPE_UNUSED_ENTRY(48), @@ -227,35 +229,35 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {  	HNS3_RX_PTYPE_UNUSED_ENTRY(108),  	HNS3_RX_PTYPE_UNUSED_ENTRY(109),  	HNS3_RX_PTYPE_UNUSED_ENTRY(110), -	HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6), -	HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6), -	HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6), -	HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6), -	HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6), -	HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6), +	HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),  	HNS3_RX_PTYPE_UNUSED_ENTRY(120),  	HNS3_RX_PTYPE_UNUSED_ENTRY(121),  	HNS3_RX_PTYPE_UNUSED_ENTRY(122), -	HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL), -	HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL), -	HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4), -	HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4), -	HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4), -	HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4), -	HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4), +	HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), +	HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),  	HNS3_RX_PTYPE_UNUSED_ENTRY(132), -	HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6), -	HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6), -	HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6), -	HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6), -	HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6), +	HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), +	HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), +	HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),  	HNS3_RX_PTYPE_UNUSED_ENTRY(140),  	HNS3_RX_PTYPE_UNUSED_ENTRY(141),  	HNS3_RX_PTYPE_UNUSED_ENTRY(142), @@ -3776,8 +3778,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,  		desc_cb->reuse_flag = 1;  	} else if (frag_size <= ring->rx_copybreak) {  		ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); -		if (ret) -			goto out; +		if (!ret) +			return;  	}  out: @@ -4171,15 +4173,35 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,  }  static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, -				     struct sk_buff *skb, u32 rss_hash) +				     struct sk_buff *skb, u32 rss_hash, +				     u32 l234info, u32 ol_info)  { -	struct hnae3_handle *handle = ring->tqp->handle; -	enum pkt_hash_types rss_type; +	enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE; +	struct net_device *netdev = ring_to_netdev(ring); +	struct hns3_nic_priv *priv = netdev_priv(netdev); -	if (rss_hash) -		rss_type = handle->kinfo.rss_type; -	else -		rss_type = PKT_HASH_TYPE_NONE; +	if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { +		u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, +					    HNS3_RXD_PTYPE_S); + +		rss_type = hns3_rx_ptype_tbl[ptype].hash_type; +	} else { +		int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, +					      HNS3_RXD_L3ID_S); +		int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, +					      HNS3_RXD_L4ID_S); + +		if (l3_type == HNS3_L3_TYPE_IPV4 || +		    l3_type == HNS3_L3_TYPE_IPV6) { +			if (l4_type == HNS3_L4_TYPE_UDP || +			    l4_type == HNS3_L4_TYPE_TCP || +			    l4_type == HNS3_L4_TYPE_SCTP) +				rss_type = PKT_HASH_TYPE_L4; +			else if (l4_type == HNS3_L4_TYPE_IGMP || +				 l4_type == HNS3_L4_TYPE_ICMP) +				rss_type = PKT_HASH_TYPE_L3; +		} +	}  	skb_set_hash(skb, rss_hash, rss_type);  } @@ -4282,7 +4304,8 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)  	ring->tqp_vector->rx_group.total_bytes += len; -	hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); +	hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash), +				 l234info, ol_info);  	return 0;  } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 133a054af6b7..294a14b4fdef 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -404,6 +404,7 @@ struct hns3_rx_ptype {  	u32 ip_summed : 2;  	u32 l3_type : 4;  	u32 valid : 1; +	u32 hash_type: 3;  };  struct ring_stats { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6962a9d69cf8..4e54f91f7a6c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3443,6 +3443,7 @@ static int hclge_update_tp_port_info(struct hclge_dev *hdev)  	hdev->hw.mac.autoneg = cmd.base.autoneg;  	hdev->hw.mac.speed = cmd.base.speed;  	hdev->hw.mac.duplex = cmd.base.duplex; +	linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);  	return 0;  } @@ -4859,7 +4860,6 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,  		return ret;  	} -	hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);  	return 0;  } @@ -11587,9 +11587,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)  	if (ret)  		goto err_msi_irq_uninit; -	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER && -	    !hnae3_dev_phy_imp_supported(hdev)) { -		ret = hclge_mac_mdio_config(hdev); +	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { +		if (hnae3_dev_phy_imp_supported(hdev)) +			ret = hclge_update_tp_port_info(hdev); +		else +			ret = hclge_mac_mdio_config(hdev); +  		if (ret)  			goto err_msi_irq_uninit;  	} @@ -12984,14 +12987,16 @@ static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)  static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,  			       u8 *priority)  { +	struct hclge_vport *vport = hclge_get_vport(h); +  	if (dscp >= HNAE3_MAX_DSCP)  		return -EINVAL;  	if (tc_mode) -		*tc_mode = h->kinfo.tc_map_mode; +		*tc_mode = vport->nic.kinfo.tc_map_mode;  	if (priority) -		*priority = h->kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : -			    h->kinfo.dscp_prio[dscp]; +		*priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : +			    vport->nic.kinfo.dscp_prio[dscp];  	return 0;  } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c index 19eb839177ec..061952c6c21a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c @@ -85,6 +85,7 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)  	struct tag_sml_funcfg_tbl *funcfg_table_elem;  	struct hinic_cmd_lt_rd *read_data;  	u16 out_size = sizeof(*read_data); +	int ret = ~0;  	int err;  	read_data = kzalloc(sizeof(*read_data), GFP_KERNEL); @@ -111,20 +112,25 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)  	switch (idx) {  	case VALID: -		return funcfg_table_elem->dw0.bs.valid; +		ret = funcfg_table_elem->dw0.bs.valid; +		break;  	case RX_MODE: -		return funcfg_table_elem->dw0.bs.nic_rx_mode; +		ret = funcfg_table_elem->dw0.bs.nic_rx_mode; +		break;  	case MTU: -		return funcfg_table_elem->dw1.bs.mtu; +		ret = funcfg_table_elem->dw1.bs.mtu; +		break;  	case RQ_DEPTH: -		return funcfg_table_elem->dw13.bs.cfg_rq_depth; +		ret = funcfg_table_elem->dw13.bs.cfg_rq_depth; +		break;  	case QUEUE_NUM: -		return funcfg_table_elem->dw13.bs.cfg_q_num; +		ret = funcfg_table_elem->dw13.bs.cfg_q_num; +		break;  	}  	kfree(read_data); -	return ~0; +	return ret;  }  static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 78190e88cd75..d39eec9c62bf 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -924,7 +924,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,  err_set_cmdq_depth:  	hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); - +	free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]);  err_cmdq_ctxt:  	hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,  			    HINIC_MAX_CMDQ_TYPES); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 94f470556295..27795288c586 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -877,7 +877,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,  	if (err)  		return -EINVAL; -	interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt; +	interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt;  	interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;  	err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index e1f54a2f28b2..2d6906aba2a2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -1474,8 +1474,15 @@ static struct pci_driver hinic_driver = {  static int __init hinic_module_init(void)  { +	int ret; +  	hinic_dbg_register_debugfs(HINIC_DRV_NAME); -	return pci_register_driver(&hinic_driver); + +	ret = pci_register_driver(&hinic_driver); +	if (ret) +		hinic_dbg_unregister_debugfs(); + +	return ret;  }  static void __exit hinic_module_exit(void) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index a5f08b969e3f..f7e05b41385b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -1174,7 +1174,6 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev)  			dev_err(&hwdev->hwif->pdev->dev,  				"Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",  				err, register_info.status, out_size); -			hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);  			return -EIO;  		}  	} else { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 294bdbbeacc3..b4aff59b3eb4 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2900,6 +2900,7 @@ static struct device *ehea_register_port(struct ehea_port *port,  	ret = of_device_register(&port->ofdev);  	if (ret) {  		pr_err("failed to register device. ret=%d\n", ret); +		put_device(&port->ofdev.dev);  		goto out;  	} diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 3b14dc93f59d..5b96cd94dcd2 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1757,7 +1757,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)  			kobject_uevent(kobj, KOBJ_ADD);  	} -	rc = netif_set_real_num_tx_queues(netdev, ibmveth_real_max_tx_queues()); +	rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(), +						      IBMVETH_DEFAULT_QUEUES));  	if (rc) {  		netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",  			   rc); diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index daf6f615c03f..115d4c45aa77 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -100,6 +100,7 @@ static inline long h_illan_attributes(unsigned long unit_address,  #define IBMVETH_MAX_BUF_SIZE (1024 * 128)  #define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)  #define IBMVETH_MAX_QUEUES 16U +#define IBMVETH_DEFAULT_QUEUES 8U  static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };  static int pool_count[] = { 256, 512, 256, 256, 256 }; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 65dbfbec487a..9282381a438f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3007,19 +3007,19 @@ static void __ibmvnic_reset(struct work_struct *work)  		rwi = get_next_rwi(adapter);  		/* -		 * If there is another reset queued, free the previous rwi -		 * and process the new reset even if previous reset failed -		 * (the previous reset could have failed because of a fail -		 * over for instance, so process the fail over). -		 *  		 * If there are no resets queued and the previous reset failed,  		 * the adapter would be in an undefined state. So retry the  		 * previous reset as a hard reset. +		 * +		 * Else, free the previous rwi and, if there is another reset +		 * queued, process the new reset even if previous reset failed +		 * (the previous reset could have failed because of a fail +		 * over for instance, so process the fail over).  		 */ -		if (rwi) -			kfree(tmprwi); -		else if (rc) +		if (!rwi && rc)  			rwi = tmprwi; +		else +			kfree(tmprwi);  		if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||  			    rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 560d1d442232..d3fdc290937f 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1741,11 +1741,8 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,  	dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len,  				  DMA_TO_DEVICE);  	/* If we can't map the skb, have the upper layer try later */ -	if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { -		dev_kfree_skb_any(skb); -		skb = NULL; +	if (dma_mapping_error(&nic->pdev->dev, dma_addr))  		return -ENOMEM; -	}  	/*  	 * Use the last 4 bytes of the SKB payload packet as the CRC, used for diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 49e926959ad3..55cf2f62bb30 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5936,9 +5936,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,  		e1000_tx_queue(tx_ring, tx_flags, count);  		/* Make sure there is space in the ring for the next send. */  		e1000_maybe_stop_tx(tx_ring, -				    (MAX_SKB_FRAGS * +				    ((MAX_SKB_FRAGS + 1) *  				     DIV_ROUND_UP(PAGE_SIZE, -						  adapter->tx_fifo_limit) + 2)); +						  adapter->tx_fifo_limit) + 4));  		if (!netdev_xmit_more() ||  		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 4a6630586ec9..fc373472e4e1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -32,6 +32,8 @@ struct workqueue_struct *fm10k_workqueue;   **/  static int __init fm10k_init_module(void)  { +	int ret; +  	pr_info("%s\n", fm10k_driver_string);  	pr_info("%s\n", fm10k_copyright); @@ -43,7 +45,13 @@ static int __init fm10k_init_module(void)  	fm10k_dbg_init(); -	return fm10k_register_pci_driver(); +	ret = fm10k_register_pci_driver(); +	if (ret) { +		fm10k_dbg_exit(); +		destroy_workqueue(fm10k_workqueue); +	} + +	return ret;  }  module_init(fm10k_init_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7e75706f76db..f6fa63e4253c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2183,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev,  			err = i40e_setup_rx_descriptors(&rx_rings[i]);  			if (err)  				goto rx_unwind; -			err = i40e_alloc_rx_bi(&rx_rings[i]); -			if (err) -				goto rx_unwind;  			/* now allocate the Rx buffers to make sure the OS  			 * has enough memory, any failure here means abort @@ -3188,10 +3185,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)  		if (cmd->flow_type == TCP_V4_FLOW ||  		    cmd->flow_type == UDP_V4_FLOW) { -			if (i_set & I40E_L3_SRC_MASK) -				cmd->data |= RXH_IP_SRC; -			if (i_set & I40E_L3_DST_MASK) -				cmd->data |= RXH_IP_DST; +			if (hw->mac.type == I40E_MAC_X722) { +				if (i_set & I40E_X722_L3_SRC_MASK) +					cmd->data |= RXH_IP_SRC; +				if (i_set & I40E_X722_L3_DST_MASK) +					cmd->data |= RXH_IP_DST; +			} else { +				if (i_set & I40E_L3_SRC_MASK) +					cmd->data |= RXH_IP_SRC; +				if (i_set & I40E_L3_DST_MASK) +					cmd->data |= RXH_IP_DST; +			}  		} else if (cmd->flow_type == TCP_V6_FLOW ||  			  cmd->flow_type == UDP_V6_FLOW) {  			if (i_set & I40E_L3_V6_SRC_MASK) @@ -3549,12 +3553,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,  /**   * i40e_get_rss_hash_bits - Read RSS Hash bits from register + * @hw: hw structure   * @nfc: pointer to user request   * @i_setc: bits currently set   *   * Returns value of bits to be set per user request   **/ -static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) +static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw, +				  struct ethtool_rxnfc *nfc, +				  u64 i_setc)  {  	u64 i_set = i_setc;  	u64 src_l3 = 0, dst_l3 = 0; @@ -3573,8 +3580,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)  		dst_l3 = I40E_L3_V6_DST_MASK;  	} else if (nfc->flow_type == TCP_V4_FLOW ||  		  nfc->flow_type == UDP_V4_FLOW) { -		src_l3 = I40E_L3_SRC_MASK; -		dst_l3 = I40E_L3_DST_MASK; +		if (hw->mac.type == I40E_MAC_X722) { +			src_l3 = I40E_X722_L3_SRC_MASK; +			dst_l3 = I40E_X722_L3_DST_MASK; +		} else { +			src_l3 = I40E_L3_SRC_MASK; +			dst_l3 = I40E_L3_DST_MASK; +		}  	} else {  		/* Any other flow type are not supported here */  		return i_set; @@ -3592,6 +3604,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)  	return i_set;  } +#define FLOW_PCTYPES_SIZE 64  /**   * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash   * @pf: pointer to the physical function struct @@ -3604,9 +3617,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)  	struct i40e_hw *hw = &pf->hw;  	u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |  		   ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); -	u8 flow_pctype = 0; +	DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);  	u64 i_set, i_setc; +	bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE); +  	if (pf->flags & I40E_FLAG_MFP_ENABLED) {  		dev_err(&pf->pdev->dev,  			"Change of RSS hash input set is not supported when MFP mode is enabled\n"); @@ -3622,36 +3637,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)  	switch (nfc->flow_type) {  	case TCP_V4_FLOW: -		flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; +		set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);  		if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) -			hena |= -			  BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); +			set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, +				flow_pctypes);  		break;  	case TCP_V6_FLOW: -		flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; -		if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) -			hena |= -			  BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); +		set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);  		if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) -			hena |= -			  BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); +			set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, +				flow_pctypes);  		break;  	case UDP_V4_FLOW: -		flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; -		if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) -			hena |= -			  BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | -			  BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - +		set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); +		if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { +			set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, +				flow_pctypes); +			set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, +				flow_pctypes); +		}  		hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);  		break;  	case UDP_V6_FLOW: -		flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; -		if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) -			hena |= -			  BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | -			  BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - +		set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); +		if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { +			set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, +				flow_pctypes); +			set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, +				flow_pctypes); +		}  		hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);  		break;  	case AH_ESP_V4_FLOW: @@ -3684,17 +3698,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)  		return -EINVAL;  	} -	if (flow_pctype) { -		i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, -					       flow_pctype)) | -			((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, -					       flow_pctype)) << 32); -		i_set = i40e_get_rss_hash_bits(nfc, i_setc); -		i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype), -				  (u32)i_set); -		i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype), -				  (u32)(i_set >> 32)); -		hena |= BIT_ULL(flow_pctype); +	if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) { +		u8 flow_id; + +		for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) { +			i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) | +				 ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32); +			i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc); + +			i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id), +					  (u32)i_set); +			i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id), +					  (u32)(i_set >> 32)); +			hena |= BIT_ULL(flow_id); +		}  	}  	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); @@ -4447,11 +4464,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,  			return -EOPNOTSUPP;  		/* First 4 bytes of L4 header */ -		if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF)) -			new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; -		else if (!usr_ip4_spec->l4_4_bytes) -			new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); -		else +		if (usr_ip4_spec->l4_4_bytes)  			return -EOPNOTSUPP;  		/* Filtering on Type of Service is not supported. */ @@ -4490,11 +4503,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,  		else  			return -EOPNOTSUPP; -		if (usr_ip6_spec->l4_4_bytes == htonl(0xFFFFFFFF)) -			new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; -		else if (!usr_ip6_spec->l4_4_bytes) -			new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); -		else +		if (usr_ip6_spec->l4_4_bytes)  			return -EOPNOTSUPP;  		/* Filtering on Traffic class is not supported. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2c07fa8ecfc8..6416322d7c18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3566,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)  	if (ring->vsi->type == I40E_VSI_MAIN)  		xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); -	kfree(ring->rx_bi);  	ring->xsk_pool = i40e_xsk_pool(ring);  	if (ring->xsk_pool) { -		ret = i40e_alloc_rx_bi_zc(ring); -		if (ret) -			return ret;  		ring->rx_buf_len =  		  xsk_pool_get_rx_frame_size(ring->xsk_pool);  		/* For AF_XDP ZC, we disallow packets to span on @@ -3589,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)  			 ring->queue_index);  	} else { -		ret = i40e_alloc_rx_bi(ring); -		if (ret) -			return ret;  		ring->rx_buf_len = vsi->rx_buf_len;  		if (ring->vsi->type == I40E_VSI_MAIN) {  			ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -10662,6 +10655,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)  }  /** + * i40e_clean_xps_state - clean xps state for every tx_ring + * @vsi: ptr to the VSI + **/ +static void i40e_clean_xps_state(struct i40e_vsi *vsi) +{ +	int i; + +	if (vsi->tx_rings) +		for (i = 0; i < vsi->num_queue_pairs; i++) +			if (vsi->tx_rings[i]) +				clear_bit(__I40E_TX_XPS_INIT_DONE, +					  vsi->tx_rings[i]->state); +} + +/**   * i40e_prep_for_reset - prep for the core to reset   * @pf: board private structure   * @@ -10685,8 +10693,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)  	i40e_pf_quiesce_all_vsi(pf);  	for (v = 0; v < pf->num_alloc_vsi; v++) { -		if (pf->vsi[v]) +		if (pf->vsi[v]) { +			i40e_clean_xps_state(pf->vsi[v]);  			pf->vsi[v]->seid = 0; +		}  	}  	i40e_shutdown_adminq(&pf->hw); @@ -13296,6 +13306,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,  		i40e_reset_and_rebuild(pf, true, true);  	} +	if (!i40e_enabled_xdp_vsi(vsi) && prog) { +		if (i40e_realloc_rx_bi_zc(vsi, true)) +			return -ENOMEM; +	} else if (i40e_enabled_xdp_vsi(vsi) && !prog) { +		if (i40e_realloc_rx_bi_zc(vsi, false)) +			return -ENOMEM; +	} +  	for (i = 0; i < vsi->num_queue_pairs; i++)  		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); @@ -13528,6 +13546,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)  	i40e_queue_pair_disable_irq(vsi, queue_pair);  	err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); +	i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);  	i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);  	i40e_queue_pair_clean_rings(vsi, queue_pair);  	i40e_queue_pair_reset_stats(vsi, queue_pair); @@ -16642,6 +16661,8 @@ static struct pci_driver i40e_driver = {   **/  static int __init i40e_init_module(void)  { +	int err; +  	pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);  	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); @@ -16659,7 +16680,14 @@ static int __init i40e_init_module(void)  	}  	i40e_dbg_init(); -	return pci_register_driver(&i40e_driver); +	err = pci_register_driver(&i40e_driver); +	if (err) { +		destroy_workqueue(i40e_wq); +		i40e_dbg_exit(); +		return err; +	} + +	return 0;  }  module_init(i40e_init_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 69e67eb6aea7..b97c95f89fa0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1457,14 +1457,6 @@ err:  	return -ENOMEM;  } -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) -{ -	unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; - -	rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); -	return rx_ring->rx_bi ? 0 : -ENOMEM; -} -  static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)  {  	memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); @@ -1593,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)  	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; +	rx_ring->rx_bi = +		kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); +	if (!rx_ring->rx_bi) +		return -ENOMEM; +  	return 0;  } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 41f86e9535a0..768290dc6f48 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -469,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);  bool __i40e_chk_linearize(struct sk_buff *skb);  int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,  		  u32 flags); -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);  /**   * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 7b3f30beb757..388c3d36d96a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1404,6 +1404,10 @@ struct i40e_lldp_variables {  #define I40E_PFQF_CTL_0_HASHLUTSIZE_512	0x00010000  /* INPUT SET MASK for RSS, flow director, and flexible payload */ +#define I40E_X722_L3_SRC_SHIFT		49 +#define I40E_X722_L3_SRC_MASK		(0x3ULL << I40E_X722_L3_SRC_SHIFT) +#define I40E_X722_L3_DST_SHIFT		41 +#define I40E_X722_L3_DST_MASK		(0x3ULL << I40E_X722_L3_DST_SHIFT)  #define I40E_L3_SRC_SHIFT		47  #define I40E_L3_SRC_MASK		(0x3ULL << I40E_L3_SRC_SHIFT)  #define I40E_L3_V6_SRC_SHIFT		43 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 7e9f6a69eb10..635f93d60318 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1536,10 +1536,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)  	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))  		return true; -	/* If the VFs have been disabled, this means something else is -	 * resetting the VF, so we shouldn't continue. -	 */ -	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) +	/* Bail out if VFs are disabled. */ +	if (test_bit(__I40E_VF_DISABLE, pf->state)) +		return true; + +	/* If VF is being reset already we don't need to continue. */ +	if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))  		return true;  	i40e_trigger_vf_reset(vf, flr); @@ -1576,7 +1578,8 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)  	i40e_cleanup_reset_vf(vf);  	i40e_flush(hw); -	clear_bit(__I40E_VF_DISABLE, pf->state); +	usleep_range(20000, 40000); +	clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);  	return true;  } @@ -1609,8 +1612,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)  		return false;  	/* Begin reset on all VFs at once */ -	for (v = 0; v < pf->num_alloc_vfs; v++) -		i40e_trigger_vf_reset(&pf->vf[v], flr); +	for (v = 0; v < pf->num_alloc_vfs; v++) { +		vf = &pf->vf[v]; +		/* If VF is being reset no need to trigger reset again */ +		if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) +			i40e_trigger_vf_reset(&pf->vf[v], flr); +	}  	/* HW requires some time to make sure it can flush the FIFO for a VF  	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -1626,9 +1633,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)  		 */  		while (v < pf->num_alloc_vfs) {  			vf = &pf->vf[v]; -			reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); -			if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) -				break; +			if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { +				reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); +				if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) +					break; +			}  			/* If the current VF has finished resetting, move on  			 * to the next VF in sequence. @@ -1656,6 +1665,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)  		if (pf->vf[v].lan_vsi_idx == 0)  			continue; +		/* If VF is reset in another thread just continue */ +		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) +			continue; +  		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);  	} @@ -1667,6 +1680,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)  		if (pf->vf[v].lan_vsi_idx == 0)  			continue; +		/* If VF is reset in another thread just continue */ +		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) +			continue; +  		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);  	} @@ -1676,10 +1693,16 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)  	mdelay(50);  	/* Finish the reset on each VF */ -	for (v = 0; v < pf->num_alloc_vfs; v++) +	for (v = 0; v < pf->num_alloc_vfs; v++) { +		/* If VF is reset in another thread just continue */ +		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) +			continue; +  		i40e_cleanup_reset_vf(&pf->vf[v]); +	}  	i40e_flush(hw); +	usleep_range(20000, 40000);  	clear_bit(__I40E_VF_DISABLE, pf->state);  	return true; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index a554d0a0b09b..358bbdb58795 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -39,6 +39,7 @@ enum i40e_vf_states {  	I40E_VF_STATE_MC_PROMISC,  	I40E_VF_STATE_UC_PROMISC,  	I40E_VF_STATE_PRE_ENABLE, +	I40E_VF_STATE_RESETTING  };  /* VF capabilities */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 6d4009e0cbd6..cd7b52fb6b46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -10,14 +10,6 @@  #include "i40e_txrx_common.h"  #include "i40e_xsk.h" -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) -{ -	unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; - -	rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); -	return rx_ring->rx_bi_zc ? 0 : -ENOMEM; -} -  void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)  {  	memset(rx_ring->rx_bi_zc, 0, @@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)  }  /** + * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer + * @rx_ring: Current rx ring + * @pool_present: is pool for XSK present + * + * Try allocating memory and return ENOMEM, if failed to allocate. + * If allocation was successful, substitute buffer with allocated one. + * Returns 0 on success, negative on failure + */ +static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) +{ +	size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : +					  sizeof(*rx_ring->rx_bi); +	void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); + +	if (!sw_ring) +		return -ENOMEM; + +	if (pool_present) { +		kfree(rx_ring->rx_bi); +		rx_ring->rx_bi = NULL; +		rx_ring->rx_bi_zc = sw_ring; +	} else { +		kfree(rx_ring->rx_bi_zc); +		rx_ring->rx_bi_zc = NULL; +		rx_ring->rx_bi = sw_ring; +	} +	return 0; +} + +/** + * i40e_realloc_rx_bi_zc - reallocate rx SW rings + * @vsi: Current VSI + * @zc: is zero copy set + * + * Reallocate buffer for rx_rings that might be used by XSK. + * XDP requires more memory, than rx_buf provides. + * Returns 0 on success, negative on failure + */ +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc) +{ +	struct i40e_ring *rx_ring; +	unsigned long q; + +	for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { +		rx_ring = vsi->rx_rings[q]; +		if (i40e_realloc_rx_xdp_bi(rx_ring, zc)) +			return -ENOMEM; +	} +	return 0; +} + +/**   * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a   * certain ring/qid   * @vsi: Current VSI @@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,  		if (err)  			return err; +		err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); +		if (err) +			return err; +  		err = i40e_queue_pair_enable(vsi, qid);  		if (err)  			return err; @@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)  	xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);  	if (if_running) { +		err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); +		if (err) +			return err;  		err = i40e_queue_pair_enable(vsi, qid);  		if (err)  			return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index bb962987f300..821df248f8be 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);  bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);  int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring); +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);  void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);  #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 3f6187c16424..0d1bab4ac1b0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -298,7 +298,6 @@ struct iavf_adapter {  #define IAVF_FLAG_QUEUES_DISABLED		BIT(17)  #define IAVF_FLAG_SETUP_NETDEV_FEATURES		BIT(18)  #define IAVF_FLAG_REINIT_MSIX_NEEDED		BIT(20) -#define IAVF_FLAG_INITIAL_MAC_SET		BIT(23)  /* duplicates for common code */  #define IAVF_FLAG_DCB_ENABLED			0  	/* flags for admin queue service task */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 3fc572341781..f71e132ede09 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1087,12 +1087,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)  	if (ret)  		return ret; -	/* If this is an initial set MAC during VF spawn do not wait */ -	if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) { -		adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET; -		return 0; -	} -  	ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,  					       iavf_is_mac_set_handled(netdev, addr->sa_data),  					       msecs_to_jiffies(2500)); @@ -2605,8 +2599,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)  		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);  	} -	adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET; -  	adapter->tx_desc_count = IAVF_DEFAULT_TXD;  	adapter->rx_desc_count = IAVF_DEFAULT_RXD;  	err = iavf_init_interrupt_scheme(adapter); @@ -2921,7 +2913,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)  	iavf_free_queues(adapter);  	memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);  	iavf_shutdown_adminq(&adapter->hw); -	adapter->netdev->flags &= ~IFF_UP;  	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;  	iavf_change_state(adapter, __IAVF_DOWN);  	wake_up(&adapter->down_waitqueue); @@ -3021,6 +3012,11 @@ static void iavf_reset_task(struct work_struct *work)  		iavf_disable_vf(adapter);  		mutex_unlock(&adapter->client_lock);  		mutex_unlock(&adapter->crit_lock); +		if (netif_running(netdev)) { +			rtnl_lock(); +			dev_close(netdev); +			rtnl_unlock(); +		}  		return; /* Do not attempt to reinit. It's dead, Jim. */  	} @@ -3033,6 +3029,7 @@ continue_reset:  	if (running) {  		netif_carrier_off(netdev); +		netif_tx_stop_all_queues(netdev);  		adapter->link_up = false;  		iavf_napi_disable_all(adapter);  	} @@ -3172,6 +3169,16 @@ reset_err:  	mutex_unlock(&adapter->client_lock);  	mutex_unlock(&adapter->crit_lock); + +	if (netif_running(netdev)) { +		/* Close device to ensure that Tx queues will not be started +		 * during netif_device_attach() at the end of the reset task. +		 */ +		rtnl_lock(); +		dev_close(netdev); +		rtnl_unlock(); +	} +  	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");  reset_finish:  	rtnl_lock(); @@ -5035,23 +5042,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)  static void iavf_remove(struct pci_dev *pdev)  {  	struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); -	struct net_device *netdev = adapter->netdev;  	struct iavf_fdir_fltr *fdir, *fdirtmp;  	struct iavf_vlan_filter *vlf, *vlftmp; +	struct iavf_cloud_filter *cf, *cftmp;  	struct iavf_adv_rss *rss, *rsstmp;  	struct iavf_mac_filter *f, *ftmp; -	struct iavf_cloud_filter *cf, *cftmp; -	struct iavf_hw *hw = &adapter->hw; +	struct net_device *netdev; +	struct iavf_hw *hw;  	int err; -	/* When reboot/shutdown is in progress no need to do anything -	 * as the adapter is already REMOVE state that was set during -	 * iavf_shutdown() callback. -	 */ -	if (adapter->state == __IAVF_REMOVE) +	netdev = adapter->netdev; +	hw = &adapter->hw; + +	if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))  		return; -	set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);  	/* Wait until port initialization is complete.  	 * There are flows where register/unregister netdev may race.  	 */ @@ -5191,6 +5196,8 @@ static struct pci_driver iavf_driver = {   **/  static int __init iavf_init_module(void)  { +	int ret; +  	pr_info("iavf: %s\n", iavf_driver_string);  	pr_info("%s\n", iavf_copyright); @@ -5201,7 +5208,12 @@ static int __init iavf_init_module(void)  		pr_err("%s: Failed to create workqueue\n", iavf_driver_name);  		return -ENOMEM;  	} -	return pci_register_driver(&iavf_driver); + +	ret = pci_register_driver(&iavf_driver); +	if (ret) +		destroy_workqueue(iavf_wq); + +	return ret;  }  module_init(iavf_init_module); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 5a9e6563923e..24a701fd140e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -2438,6 +2438,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,  		list_for_each_entry(f, &adapter->vlan_filter_list, list) {  			if (f->is_new_vlan) {  				f->is_new_vlan = false; +				if (!f->vlan.vid) +					continue;  				if (f->vlan.tpid == ETH_P_8021Q)  					set_bit(f->vlan.vid,  						adapter->vsi.active_cvlans); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 9e36f01dfa4f..e864634d66bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -958,7 +958,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,  	 * associated to the queue to schedule NAPI handler  	 */  	q_vector = ring->q_vector; -	if (q_vector) +	if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))  		ice_trigger_sw_intr(hw, q_vector);  	status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 938ba8c215cb..7276badfa19e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2240,6 +2240,31 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)  }  /** + * ice_vsi_is_rx_queue_active + * @vsi: the VSI being configured + * + * Return true if at least one queue is active. + */ +bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) +{ +	struct ice_pf *pf = vsi->back; +	struct ice_hw *hw = &pf->hw; +	int i; + +	ice_for_each_rxq(vsi, i) { +		u32 rx_reg; +		int pf_q; + +		pf_q = vsi->rxq_map[i]; +		rx_reg = rd32(hw, QRX_CTRL(pf_q)); +		if (rx_reg & QRX_CTRL_QENA_STAT_M) +			return true; +	} + +	return false; +} + +/**   * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not   * @vsi: VSI to check whether or not VLAN pruning is enabled.   * diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index ec4bf0c89857..dcdf69a693e9 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -129,4 +129,5 @@ u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);  bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);  void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);  void ice_init_feature_support(struct ice_pf *pf); +bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);  #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0f6718719453..ca2898467dcb 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3145,15 +3145,15 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)   */  static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)  { -	irqreturn_t ret = IRQ_HANDLED;  	struct ice_pf *pf = data; -	bool irq_handled; -	irq_handled = ice_ptp_process_ts(pf); -	if (!irq_handled) -		ret = IRQ_WAKE_THREAD; +	if (ice_is_reset_in_progress(pf->state)) +		return IRQ_HANDLED; -	return ret; +	while (!ice_ptp_process_ts(pf)) +		usleep_range(50, 100); + +	return IRQ_HANDLED;  }  /** diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 011b727ab190..0f668468d141 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -614,11 +614,14 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)   * 2) extend the 40b timestamp value to get a 64bit timestamp   * 3) send that timestamp to the stack   * - * After looping, if we still have waiting SKBs, return true. This may cause us - * effectively poll even when not strictly necessary. We do this because it's - * possible a new timestamp was requested around the same time as the interrupt. - * In some cases hardware might not interrupt us again when the timestamp is - * captured. + * Returns true if all timestamps were handled, and false if any slots remain + * without a timestamp. + * + * After looping, if we still have waiting SKBs, return false. This may cause + * us effectively poll even when not strictly necessary. We do this because + * it's possible a new timestamp was requested around the same time as the + * interrupt. In some cases hardware might not interrupt us again when the + * timestamp is captured.   *   * Note that we only take the tracking lock when clearing the bit and when   * checking if we need to re-queue this task. The only place where bits can be @@ -641,7 +644,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)  	u8 idx;  	if (!tx->init) -		return false; +		return true;  	ptp_port = container_of(tx, struct ice_ptp_port, tx);  	pf = ptp_port_to_pf(ptp_port); @@ -2381,10 +2384,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)   */  bool ice_ptp_process_ts(struct ice_pf *pf)  { -	if (pf->ptp.port.tx.init) -		return ice_ptp_tx_tstamp(&pf->ptp.port.tx); - -	return false; +	return ice_ptp_tx_tstamp(&pf->ptp.port.tx);  }  static void ice_ptp_periodic_work(struct kthread_work *work) diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 0abeed092de1..1c51778db951 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -576,7 +576,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)  			return -EINVAL;  		}  		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); -		ice_vsi_stop_all_rx_rings(vsi); + +		if (ice_vsi_is_rx_queue_active(vsi)) +			ice_vsi_stop_all_rx_rings(vsi); +  		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",  			vf->vf_id);  		return 0; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index e5f3e7680dc6..ff911af16a4b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)  			*data = 1;  			return -1;  		} +		wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8); +		wr32(E1000_EIMS, BIT(0));  	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {  		shared_int = false;  		if (request_irq(irq, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 99933e89717a..e338fa572793 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4869,6 +4869,8 @@ static struct pci_driver ixgbevf_driver = {   **/  static int __init ixgbevf_init_module(void)  { +	int err; +  	pr_info("%s\n", ixgbevf_driver_string);  	pr_info("%s\n", ixgbevf_copyright);  	ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); @@ -4877,7 +4879,13 @@ static int __init ixgbevf_init_module(void)  		return -ENOMEM;  	} -	return pci_register_driver(&ixgbevf_driver); +	err = pci_register_driver(&ixgbevf_driver); +	if (err) { +		destroy_workqueue(ixgbevf_wq); +		return err; +	} + +	return 0;  }  module_init(ixgbevf_init_module); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 59aab4086dcc..f5961bdcc480 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -485,7 +485,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)  	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;  	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { -		dev_kfree_skb_any(skb);  		netdev_err(dev, "tx ring full\n");  		netif_tx_stop_queue(txq);  		return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 707993b445d1..8941f69d93e9 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2481,6 +2481,7 @@ out_free:  	for (i = 0; i < mp->rxq_count; i++)  		rxq_deinit(mp->rxq + i);  out: +	napi_disable(&mp->napi);  	free_irq(dev->irq, dev);  	return err; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ff3e361e06e7..5aefaaff0871 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4271,7 +4271,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)  	/* Use the cpu associated to the rxq when it is online, in all  	 * the other cases, use the cpu 0 which can't be offline.  	 */ -	if (cpu_online(pp->rxq_def)) +	if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))  		elected_cpu = pp->rxq_def;  	max_cpu = num_present_cpus(); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index eb0fb8128096..b399bdb1ca36 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7350,6 +7350,7 @@ static int mvpp2_get_sram(struct platform_device *pdev,  			  struct mvpp2 *priv)  {  	struct resource *res; +	void __iomem *base;  	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);  	if (!res) { @@ -7360,9 +7361,12 @@ static int mvpp2_get_sram(struct platform_device *pdev,  		return 0;  	} -	priv->cm3_base = devm_ioremap_resource(&pdev->dev, res); +	base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(base)) +		return PTR_ERR(base); -	return PTR_ERR_OR_ZERO(priv->cm3_base); +	priv->cm3_base = base; +	return 0;  }  static int mvpp2_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 9089adcb75f9..b45dd7f04e21 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -521,14 +521,12 @@ static int octep_open(struct net_device *netdev)  	octep_oq_dbell_init(oct);  	ret = octep_get_link_status(oct); -	if (ret) +	if (ret > 0)  		octep_link_up(netdev);  	return 0;  set_queues_err: -	octep_napi_disable(oct); -	octep_napi_delete(oct);  	octep_clean_irqs(oct);  setup_irq_err:  	octep_free_oqs(oct); @@ -958,7 +956,7 @@ int octep_device_setup(struct octep_device *oct)  	ret = octep_ctrl_mbox_init(ctrl_mbox);  	if (ret) {  		dev_err(&pdev->dev, "Failed to initialize control mbox\n"); -		return -1; +		goto unsupported_dev;  	}  	oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz,  							   ctrl_mbox->h2fq.elem_cnt, @@ -968,6 +966,10 @@ int octep_device_setup(struct octep_device *oct)  	return 0;  unsupported_dev: +	for (i = 0; i < OCTEP_MMIO_REGIONS; i++) +		iounmap(oct->mmio[i].hw_addr); + +	kfree(oct->conf);  	return -1;  } @@ -1070,7 +1072,11 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	netdev->max_mtu = OCTEP_MAX_MTU;  	netdev->mtu = OCTEP_DEFAULT_MTU; -	octep_get_mac_addr(octep_dev, octep_dev->mac_addr); +	err = octep_get_mac_addr(octep_dev, octep_dev->mac_addr); +	if (err) { +		dev_err(&pdev->dev, "Failed to get mac address\n"); +		goto register_dev_err; +	}  	eth_hw_addr_set(netdev, octep_dev->mac_addr);  	err = register_netdev(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index e1036b0eb6b1..993ac180a5db 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -36,6 +36,7 @@ config OCTEONTX2_PF  	select DIMLIB  	depends on PCI  	depends on PTP_1588_CLOCK_OPTIONAL +	depends on MACSEC || !MACSEC  	help  	  This driver supports Marvell's OcteonTX2 NIC physical function. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c index 4a343f853b28..c0bedf402da9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c @@ -951,7 +951,7 @@ static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction d  		else  			event.intr_mask = (dir == MCS_RX) ?  					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT : -					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT; +					  MCS_BBE_TX_PLFIFO_OVERFLOW_INT;  		/* Notify the lmac_id info which ran into BBE fatal error */  		event.lmac_id = i & 0x3ULL; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index a1970ebedf95..f66dde2b0f92 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -880,6 +880,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)  		sprintf(lmac, "LMAC%d", lmac_id);  		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",  			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); + +		pci_dev_put(pdev);  	}  	return 0;  } @@ -2566,6 +2568,7 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)  		}  	} +	pci_dev_put(pdev);  	return 0;  } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 7646bb2ec89b..a62c1b322012 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4985,6 +4985,8 @@ static int nix_setup_ipolicers(struct rvu *rvu,  		ipolicer->ref_count = devm_kcalloc(rvu->dev,  						   ipolicer->band_prof.max,  						   sizeof(u16), GFP_KERNEL); +		if (!ipolicer->ref_count) +			return -ENOMEM;  	}  	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c index b04fb226f708..ae50d56258ec 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -62,15 +62,18 @@ int rvu_sdp_init(struct rvu *rvu)  		pfvf->sdp_info = devm_kzalloc(rvu->dev,  					      sizeof(struct sdp_node_info),  					      GFP_KERNEL); -		if (!pfvf->sdp_info) +		if (!pfvf->sdp_info) { +			pci_dev_put(pdev);  			return -ENOMEM; +		}  		dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]); -		put_device(&pdev->dev);  		i++;  	} +	pci_dev_put(pdev); +  	return 0;  } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 9809f551fc2e..9ec5f38d38a8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -815,6 +815,7 @@ free_flowid:  	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,  			    txsc->hw_flow_id, false);  fail: +	kfree(txsc);  	return ERR_PTR(ret);  } @@ -870,6 +871,7 @@ free_flowid:  	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,  			    rxsc->hw_flow_id, false);  fail: +	kfree(rxsc);  	return ERR_PTR(ret);  } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 9ac9e6615ae7..9e10e7471b88 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -898,6 +898,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)  	}  	sq->head = 0; +	sq->cons_head = 0;  	sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;  	sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;  	/* Set SQE threshold to 10% of total SQEs */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 282db6fe3b08..67aa02bb2b85 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -884,7 +884,7 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,  static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)  {  #ifdef CONFIG_DCB -	if (pfvf->pfc_alloc_status[qidx]) +	if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])  		return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];  #endif diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 892ca88e0cf4..303930499a4c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -15,6 +15,7 @@  #include <net/ip.h>  #include <linux/bpf.h>  #include <linux/bpf_trace.h> +#include <linux/bitfield.h>  #include "otx2_reg.h"  #include "otx2_common.h" @@ -1171,6 +1172,59 @@ int otx2_set_real_num_queues(struct net_device *netdev,  }  EXPORT_SYMBOL(otx2_set_real_num_queues); +static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = { +	"NIX_SQOPERR_OOR", +	"NIX_SQOPERR_CTX_FAULT", +	"NIX_SQOPERR_CTX_POISON", +	"NIX_SQOPERR_DISABLED", +	"NIX_SQOPERR_SIZE_ERR", +	"NIX_SQOPERR_OFLOW", +	"NIX_SQOPERR_SQB_NULL", +	"NIX_SQOPERR_SQB_FAULT", +	"NIX_SQOPERR_SQE_SZ_ZERO", +}; + +static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = { +	"NIX_MNQERR_SQ_CTX_FAULT", +	"NIX_MNQERR_SQ_CTX_POISON", +	"NIX_MNQERR_SQB_FAULT", +	"NIX_MNQERR_SQB_POISON", +	"NIX_MNQERR_TOTAL_ERR", +	"NIX_MNQERR_LSO_ERR", +	"NIX_MNQERR_CQ_QUERY_ERR", +	"NIX_MNQERR_MAX_SQE_SIZE_ERR", +	"NIX_MNQERR_MAXLEN_ERR", +	"NIX_MNQERR_SQE_SIZEM1_ZERO", +}; + +static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] =  { +	"NIX_SND_STATUS_GOOD", +	"NIX_SND_STATUS_SQ_CTX_FAULT", +	"NIX_SND_STATUS_SQ_CTX_POISON", +	"NIX_SND_STATUS_SQB_FAULT", +	"NIX_SND_STATUS_SQB_POISON", +	"NIX_SND_STATUS_HDR_ERR", +	"NIX_SND_STATUS_EXT_ERR", +	"NIX_SND_STATUS_JUMP_FAULT", +	"NIX_SND_STATUS_JUMP_POISON", +	"NIX_SND_STATUS_CRC_ERR", +	"NIX_SND_STATUS_IMM_ERR", +	"NIX_SND_STATUS_SG_ERR", +	"NIX_SND_STATUS_MEM_ERR", +	"NIX_SND_STATUS_INVALID_SUBDC", +	"NIX_SND_STATUS_SUBDC_ORDER_ERR", +	"NIX_SND_STATUS_DATA_FAULT", +	"NIX_SND_STATUS_DATA_POISON", +	"NIX_SND_STATUS_NPC_DROP_ACTION", +	"NIX_SND_STATUS_LOCK_VIOL", +	"NIX_SND_STATUS_NPC_UCAST_CHAN_ERR", +	"NIX_SND_STATUS_NPC_MCAST_CHAN_ERR", +	"NIX_SND_STATUS_NPC_MCAST_ABORT", +	"NIX_SND_STATUS_NPC_VTAG_PTR_ERR", +	"NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", +	"NIX_SND_STATUS_SEND_STATS_ERR", +}; +  static irqreturn_t otx2_q_intr_handler(int irq, void *data)  {  	struct otx2_nic *pf = data; @@ -1204,46 +1258,67 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)  	/* SQ */  	for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { +		u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg; +		u8 sq_op_err_code, mnq_err_code, snd_err_code; + +		/* Below debug registers captures first errors corresponding to +		 * those registers. We don't have to check against SQ qid as +		 * these are fatal errors. +		 */ +  		ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);  		val = otx2_atomic64_add((qidx << 44), ptr);  		otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |  			     (val & NIX_SQINT_BITS)); -		if (!(val & (NIX_SQINT_BITS | BIT_ULL(42)))) -			continue; -  		if (val & BIT_ULL(42)) {  			netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",  				   qidx, otx2_read64(pf, NIX_LF_ERR_INT)); -		} else { -			if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) { -				netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx", -					   qidx, -					   otx2_read64(pf, -						       NIX_LF_SQ_OP_ERR_DBG)); -				otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, -					     BIT_ULL(44)); -			} -			if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) { -				netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n", -					   qidx, -					   otx2_read64(pf, NIX_LF_MNQ_ERR_DBG)); -				otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, -					     BIT_ULL(44)); -			} -			if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) { -				netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx", -					   qidx, -					   otx2_read64(pf, -						       NIX_LF_SEND_ERR_DBG)); -				otx2_write64(pf, NIX_LF_SEND_ERR_DBG, -					     BIT_ULL(44)); -			} -			if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) -				netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", -					   qidx); +			goto done;  		} +		sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG); +		if (!(sq_op_err_dbg & BIT(44))) +			goto chk_mnq_err_dbg; + +		sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); +		netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx)  err=%s\n", +			   qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]); + +		otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); + +		if (sq_op_err_code == NIX_SQOPERR_SQB_NULL) +			goto chk_mnq_err_dbg; + +		/* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure. +		 * TODO: But we are in irq context. How to call mbox functions which does sleep +		 */ + +chk_mnq_err_dbg: +		mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG); +		if (!(mnq_err_dbg & BIT(44))) +			goto chk_snd_err_dbg; + +		mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); +		netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx)  err=%s\n", +			   qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code]); +		otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); + +chk_snd_err_dbg: +		snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); +		if (snd_err_dbg & BIT(44)) { +			snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); +			netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n", +				   qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]); +			otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); +		} + +done: +		/* Print values and reset */ +		if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) +			netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", +				   qidx); +  		schedule_work(&pf->reset_task);  	} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h index aa205a0d158f..fa37b9f312ca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -281,4 +281,61 @@ enum nix_sqint_e {  			BIT_ULL(NIX_SQINT_SEND_ERR) | \  			BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) +enum nix_sqoperr_e { +	NIX_SQOPERR_OOR = 0, +	NIX_SQOPERR_CTX_FAULT = 1, +	NIX_SQOPERR_CTX_POISON = 2, +	NIX_SQOPERR_DISABLED = 3, +	NIX_SQOPERR_SIZE_ERR = 4, +	NIX_SQOPERR_OFLOW = 5, +	NIX_SQOPERR_SQB_NULL = 6, +	NIX_SQOPERR_SQB_FAULT = 7, +	NIX_SQOPERR_SQE_SZ_ZERO = 8, +	NIX_SQOPERR_MAX, +}; + +enum nix_mnqerr_e { +	NIX_MNQERR_SQ_CTX_FAULT = 0, +	NIX_MNQERR_SQ_CTX_POISON = 1, +	NIX_MNQERR_SQB_FAULT = 2, +	NIX_MNQERR_SQB_POISON = 3, +	NIX_MNQERR_TOTAL_ERR = 4, +	NIX_MNQERR_LSO_ERR = 5, +	NIX_MNQERR_CQ_QUERY_ERR = 6, +	NIX_MNQERR_MAX_SQE_SIZE_ERR = 7, +	NIX_MNQERR_MAXLEN_ERR = 8, +	NIX_MNQERR_SQE_SIZEM1_ZERO = 9, +	NIX_MNQERR_MAX, +}; + +enum nix_snd_status_e { +	NIX_SND_STATUS_GOOD = 0x0, +	NIX_SND_STATUS_SQ_CTX_FAULT = 0x1, +	NIX_SND_STATUS_SQ_CTX_POISON = 0x2, +	NIX_SND_STATUS_SQB_FAULT = 0x3, +	NIX_SND_STATUS_SQB_POISON = 0x4, +	NIX_SND_STATUS_HDR_ERR = 0x5, +	NIX_SND_STATUS_EXT_ERR = 0x6, +	NIX_SND_STATUS_JUMP_FAULT = 0x7, +	NIX_SND_STATUS_JUMP_POISON = 0x8, +	NIX_SND_STATUS_CRC_ERR = 0x9, +	NIX_SND_STATUS_IMM_ERR = 0x10, +	NIX_SND_STATUS_SG_ERR = 0x11, +	NIX_SND_STATUS_MEM_ERR = 0x12, +	NIX_SND_STATUS_INVALID_SUBDC = 0x13, +	NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14, +	NIX_SND_STATUS_DATA_FAULT = 0x15, +	NIX_SND_STATUS_DATA_POISON = 0x16, +	NIX_SND_STATUS_NPC_DROP_ACTION = 0x17, +	NIX_SND_STATUS_LOCK_VIOL = 0x18, +	NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19, +	NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20, +	NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21, +	NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22, +	NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23, +	NIX_SND_STATUS_SEND_MEM_FAULT = 0x24, +	NIX_SND_STATUS_SEND_STATS_ERR = 0x25, +	NIX_SND_STATUS_MAX, +}; +  #endif /* OTX2_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index e64318c110fd..6a01ab1a6e6f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -1134,7 +1134,12 @@ int otx2_init_tc(struct otx2_nic *nic)  		return err;  	tc->flow_ht_params = tc_flow_ht_params; -	return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); +	err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params); +	if (err) { +		kfree(tc->tc_entries_bitmap); +		tc->tc_entries_bitmap = NULL; +	} +	return err;  }  EXPORT_SYMBOL(otx2_init_tc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 5ec11d71bf60..ef10aef3cda0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -441,6 +441,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,  				struct otx2_cq_queue *cq, int budget)  {  	int tx_pkts = 0, tx_bytes = 0, qidx; +	struct otx2_snd_queue *sq;  	struct nix_cqe_tx_s *cqe;  	int processed_cqe = 0; @@ -451,6 +452,9 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,  		return 0;  process_cqe: +	qidx = cq->cq_idx - pfvf->hw.rx_queues; +	sq = &pfvf->qset.sq[qidx]; +  	while (likely(processed_cqe < budget) && cq->pend_cqe) {  		cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);  		if (unlikely(!cqe)) { @@ -458,18 +462,20 @@ process_cqe:  				return 0;  			break;  		} +  		if (cq->cq_type == CQ_XDP) { -			qidx = cq->cq_idx - pfvf->hw.rx_queues; -			otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], -						 cqe); +			otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);  		} else { -			otx2_snd_pkt_handler(pfvf, cq, -					     &pfvf->qset.sq[cq->cint_idx], -					     cqe, budget, &tx_pkts, &tx_bytes); +			otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget, +					     &tx_pkts, &tx_bytes);  		} +  		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;  		processed_cqe++;  		cq->pend_cqe--; + +		sq->cons_head++; +		sq->cons_head &= (sq->sqe_cnt - 1);  	}  	/* Free CQEs to HW */ @@ -1072,17 +1078,17 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,  {  	struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);  	struct otx2_nic *pfvf = netdev_priv(netdev); -	int offset, num_segs, free_sqe; +	int offset, num_segs, free_desc;  	struct nix_sqe_hdr_s *sqe_hdr; -	/* Check if there is room for new SQE. -	 * 'Num of SQBs freed to SQ's pool - SQ's Aura count' -	 * will give free SQE count. +	/* Check if there is enough room between producer +	 * and consumer index.  	 */ -	free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; +	free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); +	if (free_desc < sq->sqe_thresh) +		return false; -	if (free_sqe < sq->sqe_thresh || -	    free_sqe < otx2_get_sqe_count(pfvf, skb)) +	if (free_desc < otx2_get_sqe_count(pfvf, skb))  		return false;  	num_segs = skb_shinfo(skb)->nr_frags + 1; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index fbe62bbfb789..93cac2c2664c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -79,6 +79,7 @@ struct sg_list {  struct otx2_snd_queue {  	u8			aura_id;  	u16			head; +	u16			cons_head;  	u16			sqe_size;  	u32			sqe_cnt;  	u16			num_sqbs; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 24f9d6024745..47796e4d900c 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -746,6 +746,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)  	return 0;  err_sfp_bind: +	unregister_netdev(dev);  err_register_netdev:  	prestera_port_list_del(port);  err_port_init: diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 4046be0e86ff..a9a1028cb17b 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -457,7 +457,7 @@ prestera_kern_neigh_cache_find(struct prestera_switch *sw,  	n_cache =  	 rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,  				__prestera_kern_neigh_cache_ht_params); -	return IS_ERR(n_cache) ? NULL : n_cache; +	return n_cache;  }  static void diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c index aa080dc57ff0..02faaea2aefa 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c @@ -330,7 +330,7 @@ prestera_nh_neigh_find(struct prestera_switch *sw,  	nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht,  					  key, __prestera_nh_neigh_ht_params); -	return IS_ERR(nh_neigh) ? NULL : nh_neigh; +	return nh_neigh;  }  struct prestera_nh_neigh * @@ -484,7 +484,7 @@ __prestera_nexthop_group_find(struct prestera_switch *sw,  	nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht,  					key, __prestera_nexthop_group_ht_params); -	return IS_ERR(nh_grp) ? NULL : nh_grp; +	return nh_grp;  }  static struct prestera_nexthop_group * diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index 42ee963e9f75..9277a8fd1339 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -776,6 +776,7 @@ tx_done:  int prestera_rxtx_switch_init(struct prestera_switch *sw)  {  	struct prestera_rxtx *rxtx; +	int err;  	rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);  	if (!rxtx) @@ -783,7 +784,11 @@ int prestera_rxtx_switch_init(struct prestera_switch *sw)  	sw->rxtx = rxtx; -	return prestera_sdma_switch_init(sw); +	err = prestera_sdma_switch_init(sw); +	if (err) +		kfree(rxtx); + +	return err;  }  void prestera_rxtx_switch_fini(struct prestera_switch *sw) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4fba7cb0144b..1d36619c5ec9 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2378,8 +2378,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)  				data + NET_SKB_PAD + eth->ip_align,  				ring->buf_size, DMA_FROM_DEVICE);  			if (unlikely(dma_mapping_error(eth->dma_dev, -						       dma_addr))) +						       dma_addr))) { +				skb_free_frag(data);  				return -ENOMEM; +			}  		}  		rxd->rxd1 = (unsigned int)dma_addr;  		ring->data[i] = data; @@ -2996,8 +2998,10 @@ static int mtk_open(struct net_device *dev)  		int i;  		err = mtk_start_dma(eth); -		if (err) +		if (err) { +			phylink_disconnect_phy(mac->phylink);  			return err; +		}  		for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)  			mtk_ppe_start(eth->ppe[i]); @@ -4060,19 +4064,23 @@ static int mtk_probe(struct platform_device *pdev)  			eth->irq[i] = platform_get_irq(pdev, i);  		if (eth->irq[i] < 0) {  			dev_err(&pdev->dev, "no IRQ%d resource found\n", i); -			return -ENXIO; +			err = -ENXIO; +			goto err_wed_exit;  		}  	}  	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {  		eth->clks[i] = devm_clk_get(eth->dev,  					    mtk_clks_source_name[i]);  		if (IS_ERR(eth->clks[i])) { -			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) -				return -EPROBE_DEFER; +			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { +				err = -EPROBE_DEFER; +				goto err_wed_exit; +			}  			if (eth->soc->required_clks & BIT(i)) {  				dev_err(&pdev->dev, "clock %s not found\n",  					mtk_clks_source_name[i]); -				return -EINVAL; +				err = -EINVAL; +				goto err_wed_exit;  			}  			eth->clks[i] = NULL;  		} @@ -4083,7 +4091,7 @@ static int mtk_probe(struct platform_device *pdev)  	err = mtk_hw_init(eth);  	if (err) -		return err; +		goto err_wed_exit;  	eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); @@ -4139,13 +4147,13 @@ static int mtk_probe(struct platform_device *pdev)  						   eth->soc->offload_version, i);  			if (!eth->ppe[i]) {  				err = -ENOMEM; -				goto err_free_dev; +				goto err_deinit_ppe;  			}  		}  		err = mtk_eth_offload_init(eth);  		if (err) -			goto err_free_dev; +			goto err_deinit_ppe;  	}  	for (i = 0; i < MTK_MAX_DEVS; i++) { @@ -4155,7 +4163,7 @@ static int mtk_probe(struct platform_device *pdev)  		err = register_netdev(eth->netdev[i]);  		if (err) {  			dev_err(eth->dev, "error bringing up device\n"); -			goto err_deinit_mdio; +			goto err_deinit_ppe;  		} else  			netif_info(eth, probe, eth->netdev[i],  				   "mediatek frame engine at 0x%08lx, irq %d\n", @@ -4173,12 +4181,15 @@ static int mtk_probe(struct platform_device *pdev)  	return 0; -err_deinit_mdio: +err_deinit_ppe: +	mtk_ppe_deinit(eth);  	mtk_mdio_cleanup(eth);  err_free_dev:  	mtk_free_dev(eth);  err_deinit_hw:  	mtk_hw_deinit(eth); +err_wed_exit: +	mtk_wed_exit();  	return err;  } @@ -4198,6 +4209,7 @@ static int mtk_remove(struct platform_device *pdev)  		phylink_disconnect_phy(mac->phylink);  	} +	mtk_wed_exit();  	mtk_hw_deinit(eth);  	netif_napi_del(ð->tx_napi); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index ae00e572390d..784ecb2dc9fb 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -397,12 +397,6 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,  	return 0;  } -static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) -{ -	return !(entry->ib1 & MTK_FOE_IB1_STATIC) && -	       FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; -} -  static bool  mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,  		     struct mtk_foe_entry *data) @@ -743,7 +737,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,  				  MTK_PPE_ENTRIES * soc->foe_entry_size,  				  &ppe->foe_phys, GFP_KERNEL);  	if (!foe) -		return NULL; +		goto err_free_l2_flows;  	ppe->foe_table = foe; @@ -751,11 +745,26 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,  			sizeof(*ppe->foe_flow);  	ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);  	if (!ppe->foe_flow) -		return NULL; +		goto err_free_l2_flows;  	mtk_ppe_debugfs_init(ppe, index);  	return ppe; + +err_free_l2_flows: +	rhashtable_destroy(&ppe->l2_flows); +	return NULL; +} + +void mtk_ppe_deinit(struct mtk_eth *eth) +{ +	int i; + +	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) { +		if (!eth->ppe[i]) +			return; +		rhashtable_destroy(ð->ppe[i]->l2_flows); +	}  }  static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index 0b7a67a958e4..a09c32539bcc 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -304,6 +304,7 @@ struct mtk_ppe {  struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,  			     int version, int index); +void mtk_ppe_deinit(struct mtk_eth *eth);  void mtk_ppe_start(struct mtk_ppe *ppe);  int mtk_ppe_stop(struct mtk_ppe *ppe); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 7e890f81148e..7050351250b7 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1026,6 +1026,8 @@ static int mtk_star_enable(struct net_device *ndev)  	return 0;  err_free_irq: +	napi_disable(&priv->rx_napi); +	napi_disable(&priv->tx_napi);  	free_irq(ndev->irq, ndev);  err_free_skbs:  	mtk_star_free_rx_skbs(priv); diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 099b6e0df619..65e01bf4b4d2 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -1072,16 +1072,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,  	pdev = of_find_device_by_node(np);  	if (!pdev) -		return; +		goto err_of_node_put;  	get_device(&pdev->dev);  	irq = platform_get_irq(pdev, 0);  	if (irq < 0) -		return; +		goto err_put_device;  	regs = syscon_regmap_lookup_by_phandle(np, NULL);  	if (IS_ERR(regs)) -		return; +		goto err_put_device;  	rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); @@ -1124,8 +1124,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,  	hw_list[index] = hw; +	mutex_unlock(&hw_lock); + +	return; +  unlock:  	mutex_unlock(&hw_lock); +err_put_device: +	put_device(&pdev->dev); +err_of_node_put: +	of_node_put(np);  }  void mtk_wed_exit(void) @@ -1146,6 +1154,7 @@ void mtk_wed_exit(void)  		hw_list[i] = NULL;  		debugfs_remove(hw->debugfs_dir);  		put_device(hw->dev); +		of_node_put(hw->node);  		kfree(hw);  	}  } diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index b149e601f673..48cfaa7eaf50 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev,  			err = mlx4_bitmap_init(*bitmap + k, 1,  					       MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,  					       0); -			mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); +			if (!err) +				mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);  		}  		if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 0377392848d9..e7a894ba5c3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -45,6 +45,8 @@  #include "mlx5_core.h"  #include "lib/eq.h"  #include "lib/tout.h" +#define CREATE_TRACE_POINTS +#include "diag/cmd_tracepoint.h"  enum {  	CMD_IF_REV = 5, @@ -785,27 +787,14 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);  static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)  {  	u16 opcode, op_mod; -	u32 syndrome; -	u8  status;  	u16 uid; -	int err; - -	syndrome = MLX5_GET(mbox_out, out, syndrome); -	status = MLX5_GET(mbox_out, out, status);  	opcode = MLX5_GET(mbox_in, in, opcode);  	op_mod = MLX5_GET(mbox_in, in, op_mod);  	uid    = MLX5_GET(mbox_in, in, uid); -	err = cmd_status_to_err(status); -  	if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)  		mlx5_cmd_out_err(dev, opcode, op_mod, out); -	else -		mlx5_core_dbg(dev, -			"%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", -			mlx5_command_str(opcode), opcode, op_mod, uid, -			cmd_status_str(status), status, syndrome, err);  }  int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) @@ -1016,6 +1005,7 @@ static void cmd_work_handler(struct work_struct *work)  		cmd_ent_get(ent);  	set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); +	cmd_ent_get(ent); /* for the _real_ FW event on completion */  	/* Skip sending command to fw if internal error */  	if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {  		ent->ret = -ENXIO; @@ -1023,7 +1013,6 @@ static void cmd_work_handler(struct work_struct *work)  		return;  	} -	cmd_ent_get(ent); /* for the _real_ FW event on completion */  	/* ring doorbell after the descriptor is valid */  	mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);  	wmb(); @@ -1508,8 +1497,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,  		return -EFAULT;  	err = sscanf(outlen_str, "%d", &outlen); -	if (err < 0) -		return err; +	if (err != 1) +		return -EINVAL;  	ptr = kzalloc(outlen, GFP_KERNEL);  	if (!ptr) @@ -1672,8 +1661,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force  				cmd_ent_put(ent); /* timeout work was canceled */  			if (!forced || /* Real FW completion */ -			    pci_channel_offline(dev->pdev) || /* FW is inaccessible */ -			    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) +			     mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ +			     !opcode_allowed(cmd, ent->op))  				cmd_ent_put(ent);  			ent->ts2 = ktime_get_ns(); @@ -1770,12 +1759,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)  	struct mlx5_cmd *cmd = &dev->cmd;  	int i; -	for (i = 0; i < cmd->max_reg_cmds; i++) -		while (down_trylock(&cmd->sem)) +	for (i = 0; i < cmd->max_reg_cmds; i++) { +		while (down_trylock(&cmd->sem)) {  			mlx5_cmd_trigger_completions(dev); +			cond_resched(); +		} +	} -	while (down_trylock(&cmd->pages_sem)) +	while (down_trylock(&cmd->pages_sem)) {  		mlx5_cmd_trigger_completions(dev); +		cond_resched(); +	}  	/* Unlock cmdif */  	up(&cmd->pages_sem); @@ -1887,6 +1881,16 @@ out_in:  	return err;  } +static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) +{ +	u32 syndrome = MLX5_GET(mbox_out, out, syndrome); +	u8 status = MLX5_GET(mbox_out, out, status); + +	trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod, +		       cmd_status_str(status), status, syndrome, +		       cmd_status_to_err(status)); +} +  static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,  			   u32 syndrome, int err)  { @@ -1909,7 +1913,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,  }  /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ -static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out) +static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)  {  	u32 syndrome = MLX5_GET(mbox_out, out, syndrome);  	u8 status = MLX5_GET(mbox_out, out, status); @@ -1917,8 +1921,10 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *  	if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */  		err = -EIO; -	if (!err && status != MLX5_CMD_STAT_OK) +	if (!err && status != MLX5_CMD_STAT_OK) {  		err = -EREMOTEIO; +		mlx5_cmd_err_trace(dev, opcode, op_mod, out); +	}  	cmd_status_log(dev, opcode, status, syndrome, err);  	return err; @@ -1946,9 +1952,9 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int  {  	int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);  	u16 opcode = MLX5_GET(mbox_in, in, opcode); +	u16 op_mod = MLX5_GET(mbox_in, in, op_mod); -	err = cmd_status_err(dev, err, opcode, out); -	return err; +	return cmd_status_err(dev, err, opcode, op_mod, out);  }  EXPORT_SYMBOL(mlx5_cmd_do); @@ -1992,8 +1998,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,  {  	int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);  	u16 opcode = MLX5_GET(mbox_in, in, opcode); +	u16 op_mod = MLX5_GET(mbox_in, in, op_mod); -	err = cmd_status_err(dev, err, opcode, out); +	err = cmd_status_err(dev, err, opcode, op_mod, out);  	return mlx5_cmd_check(dev, err, in, out);  }  EXPORT_SYMBOL(mlx5_cmd_exec_polling); @@ -2004,7 +2011,7 @@ void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,  	ctx->dev = dev;  	/* Starts at 1 to avoid doing wake_up if we are not cleaning up */  	atomic_set(&ctx->num_inflight, 1); -	init_waitqueue_head(&ctx->wait); +	init_completion(&ctx->inflight_done);  }  EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); @@ -2018,8 +2025,8 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);   */  void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)  { -	atomic_dec(&ctx->num_inflight); -	wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0); +	if (!atomic_dec_and_test(&ctx->num_inflight)) +		wait_for_completion(&ctx->inflight_done);  }  EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); @@ -2029,10 +2036,10 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)  	struct mlx5_async_ctx *ctx;  	ctx = work->ctx; -	status = cmd_status_err(ctx->dev, status, work->opcode, work->out); +	status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);  	work->user_callback(status, work);  	if (atomic_dec_and_test(&ctx->num_inflight)) -		wake_up(&ctx->wait); +		complete(&ctx->inflight_done);  }  int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, @@ -2044,13 +2051,14 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,  	work->ctx = ctx;  	work->user_callback = callback;  	work->opcode = MLX5_GET(mbox_in, in, opcode); +	work->op_mod = MLX5_GET(mbox_in, in, op_mod);  	work->out = out;  	if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))  		return -EIO;  	ret = cmd_exec(ctx->dev, in, in_size, out, out_size,  		       mlx5_cmd_exec_cb_handler, work, false);  	if (ret && atomic_dec_and_test(&ctx->num_inflight)) -		wake_up(&ctx->wait); +		complete(&ctx->inflight_done);  	return ret;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h new file mode 100644 index 000000000000..406ebe17405f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_CMD_TP_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_CMD_TP_H_ + +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> + +TRACE_EVENT(mlx5_cmd, +	    TP_PROTO(const char *command_str, u16 opcode, u16 op_mod, +		     const char *status_str, u8 status, u32 syndrome, int err), +	    TP_ARGS(command_str, opcode, op_mod, status_str, status, syndrome, err), +	    TP_STRUCT__entry(__string(command_str, command_str) +			     __field(u16, opcode) +			     __field(u16, op_mod) +			    __string(status_str, status_str) +			    __field(u8, status) +			    __field(u32, syndrome) +			    __field(int, err) +			    ), +	    TP_fast_assign(__assign_str(command_str, command_str); +			__entry->opcode = opcode; +			__entry->op_mod = op_mod; +			__assign_str(status_str, status_str); +			__entry->status = status; +			__entry->syndrome = syndrome; +			__entry->err = err; +	    ), +	    TP_printk("%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)", +		      __get_str(command_str), __entry->opcode, __entry->op_mod, +		      __get_str(status_str), __entry->status, __entry->syndrome, +		      __entry->err) +); + +#endif /* _MLX5_CMD_TP_H_ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ./diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cmd_tracepoint +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 978a2bb8e122..21831386b26e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,  			trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |  					  (str_frmt->timestamp & MASK_6_0);  		else -			trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) | +			trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |  					  (str_frmt->timestamp & MASK_6_0);  		mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h index 5bce554e131a..cc7efde88ac3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -6,6 +6,7 @@  #include "en.h"  #include "en_stats.h" +#include "en/txrx.h"  #include <linux/ptp_classify.h>  #define MLX5E_PTP_CHANNEL_IX 0 @@ -68,6 +69,14 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)  		fk.ports.dst == htons(PTP_EV_PORT));  } +static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq) +{ +	if (!sq->ptpsq) +		return true; + +	return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo); +} +  int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,  		   u8 lag_port, struct mlx5e_ptp **cp);  void mlx5e_ptp_close(struct mlx5e_ptp *c); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 39ef2a2561a3..8099a21e674c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr  	return err;  } +static int +mlx5_esw_bridge_changeupper_validate_netdev(void *ptr) +{ +	struct net_device *dev = netdev_notifier_info_to_dev(ptr); +	struct netdev_notifier_changeupper_info *info = ptr; +	struct net_device *upper = info->upper_dev; +	struct net_device *lower; +	struct list_head *iter; + +	if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev)) +		return 0; + +	netdev_for_each_lower_dev(dev, lower, iter) { +		struct mlx5_core_dev *mdev; +		struct mlx5e_priv *priv; + +		if (!mlx5e_eswitch_rep(lower)) +			continue; + +		priv = netdev_priv(lower); +		mdev = priv->mdev; +		if (!mlx5_lag_is_active(mdev)) +			return -EAGAIN; +		if (!mlx5_lag_is_shared_fdb(mdev)) +			return -EOPNOTSUPP; +	} + +	return 0; +} +  static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,  						unsigned long event, void *ptr)  { @@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,  	switch (event) {  	case NETDEV_PRECHANGEUPPER: +		err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);  		break;  	case NETDEV_CHANGEUPPER: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c index 305fde62a78d..3337241cfd84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -6,70 +6,42 @@  #include "en/tc_priv.h"  #include "mlx5_core.h" -/* Must be aligned with enum flow_action_id. */  static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = { -	&mlx5e_tc_act_accept, -	&mlx5e_tc_act_drop, -	&mlx5e_tc_act_trap, -	&mlx5e_tc_act_goto, -	&mlx5e_tc_act_mirred, -	&mlx5e_tc_act_mirred, -	&mlx5e_tc_act_redirect_ingress, -	NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ -	&mlx5e_tc_act_vlan, -	&mlx5e_tc_act_vlan, -	&mlx5e_tc_act_vlan_mangle, -	&mlx5e_tc_act_tun_encap, -	&mlx5e_tc_act_tun_decap, -	&mlx5e_tc_act_pedit, -	&mlx5e_tc_act_pedit, -	&mlx5e_tc_act_csum, -	NULL, /* FLOW_ACTION_MARK, */ -	&mlx5e_tc_act_ptype, -	NULL, /* FLOW_ACTION_PRIORITY, */ -	NULL, /* FLOW_ACTION_WAKE, */ -	NULL, /* FLOW_ACTION_QUEUE, */ -	&mlx5e_tc_act_sample, -	&mlx5e_tc_act_police, -	&mlx5e_tc_act_ct, -	NULL, /* FLOW_ACTION_CT_METADATA, */ -	&mlx5e_tc_act_mpls_push, -	&mlx5e_tc_act_mpls_pop, -	NULL, /* FLOW_ACTION_MPLS_MANGLE, */ -	NULL, /* FLOW_ACTION_GATE, */ -	NULL, /* FLOW_ACTION_PPPOE_PUSH, */ -	NULL, /* FLOW_ACTION_JUMP, */ -	NULL, /* FLOW_ACTION_PIPE, */ -	&mlx5e_tc_act_vlan, -	&mlx5e_tc_act_vlan, +	[FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept, +	[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop, +	[FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap, +	[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto, +	[FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred, +	[FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred, +	[FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress, +	[FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan, +	[FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan, +	[FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle, +	[FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap, +	[FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap, +	[FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit, +	[FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit, +	[FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum, +	[FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype, +	[FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample, +	[FLOW_ACTION_POLICE] = &mlx5e_tc_act_police, +	[FLOW_ACTION_CT] = &mlx5e_tc_act_ct, +	[FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push, +	[FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop, +	[FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan, +	[FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan,  }; -/* Must be aligned with enum flow_action_id. */  static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = { -	&mlx5e_tc_act_accept, -	&mlx5e_tc_act_drop, -	NULL, /* FLOW_ACTION_TRAP, */ -	&mlx5e_tc_act_goto, -	&mlx5e_tc_act_mirred_nic, -	NULL, /* FLOW_ACTION_MIRRED, */ -	NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */ -	NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ -	NULL, /* FLOW_ACTION_VLAN_PUSH, */ -	NULL, /* FLOW_ACTION_VLAN_POP, */ -	NULL, /* FLOW_ACTION_VLAN_MANGLE, */ -	NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */ -	NULL, /* FLOW_ACTION_TUNNEL_DECAP, */ -	&mlx5e_tc_act_pedit, -	&mlx5e_tc_act_pedit, -	&mlx5e_tc_act_csum, -	&mlx5e_tc_act_mark, -	NULL, /* FLOW_ACTION_PTYPE, */ -	NULL, /* FLOW_ACTION_PRIORITY, */ -	NULL, /* FLOW_ACTION_WAKE, */ -	NULL, /* FLOW_ACTION_QUEUE, */ -	NULL, /* FLOW_ACTION_SAMPLE, */ -	NULL, /* FLOW_ACTION_POLICE, */ -	&mlx5e_tc_act_ct, +	[FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept, +	[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop, +	[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto, +	[FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic, +	[FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit, +	[FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit, +	[FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum, +	[FLOW_ACTION_MARK] = &mlx5e_tc_act_mark, +	[FLOW_ACTION_CT] = &mlx5e_tc_act_ct,  };  /** diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 10c9a8a79d00..2e42d7c5451e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -96,6 +96,7 @@ struct mlx5e_tc_flow {  	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];  	struct mlx5e_tc_flow *peer_flow;  	struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ +	struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */  	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */  	struct list_head hairpin; /* flows sharing the same hairpin */  	struct list_head peer;    /* flows with peer flow */ @@ -111,6 +112,7 @@ struct mlx5e_tc_flow {  	struct completion del_hw_done;  	struct mlx5_flow_attr *attr;  	struct list_head attrs; +	u32 chain_mapping;  };  struct mlx5_flow_handle * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 5aff97914367..ff73d25bc6eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -224,15 +224,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,  	list_for_each_entry(flow, flow_list, tmp_list) {  		if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))  			continue; -		spec = &flow->attr->parse_attr->spec; - -		/* update from encap rule to slow path rule */ -		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);  		attr = mlx5e_tc_get_encap_attr(flow);  		esw_attr = attr->esw_attr;  		/* mark the flow's encap dest as non-valid */  		esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; +		esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL; + +		/* update from encap rule to slow path rule */ +		spec = &flow->attr->parse_attr->spec; +		rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);  		if (IS_ERR(rule)) {  			err = PTR_ERR(rule); @@ -251,6 +252,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,  	/* we know that the encap is valid */  	e->flags &= ~MLX5_ENCAP_ENTRY_VALID;  	mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); +	e->pkt_reformat = NULL;  }  static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow, @@ -762,8 +764,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,  		       struct net_device *mirred_dev,  		       int out_index,  		       struct netlink_ext_ack *extack, -		       struct net_device **encap_dev, -		       bool *encap_valid) +		       struct net_device **encap_dev)  {  	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;  	struct mlx5e_tc_flow_parse_attr *parse_attr; @@ -878,9 +879,8 @@ attach_flow:  	if (e->flags & MLX5_ENCAP_ENTRY_VALID) {  		attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;  		attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; -		*encap_valid = true;  	} else { -		*encap_valid = false; +		flow_flag_set(flow, SLOW);  	}  	mutex_unlock(&esw->offloads.encap_tbl_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h index d542b8476491..8ad273dde40e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h @@ -17,8 +17,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,  		       struct net_device *mirred_dev,  		       int out_index,  		       struct netlink_ext_ack *extack, -		       struct net_device **encap_dev, -		       bool *encap_valid); +		       struct net_device **encap_dev);  int mlx5e_attach_decap(struct mlx5e_priv *priv,  		       struct mlx5e_tc_flow *flow, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 4456ad5cedf1..853f312cd757 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -11,6 +11,27 @@  #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) +/* IPSEC inline data includes: + * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for + *    next header. + * 2. ESP authentication data: 16 bytes for ICV. + */ +#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \ +					   255 + 1 + 1 + 16, MLX5_SEND_WQE_DS) + +/* 366 should be big enough to cover all L2, L3 and L4 headers with possible + * encapsulations. + */ +#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \ +					    MLX5_SEND_WQE_DS) + +/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */ +#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \ +					 MLX5E_MAX_TX_INLINE_DS + \ +					 MLX5E_MAX_TX_IPSEC_DS + \ +					 MAX_SKB_FRAGS + 1, \ +					 MLX5_SEND_WQEBB_NUM_DS) +  #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)  static inline @@ -58,6 +79,12 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);  void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);  static inline bool +mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo) +{ +	return (*fifo->pc - *fifo->cc) < fifo->mask; +} + +static inline bool  mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)  {  	return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); @@ -418,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,  static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)  { +	WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev)); +  	/* A WQE must not cross the page boundary, hence two conditions:  	 * 1. Its size must not exceed the page size.  	 * 2. If the WQE size is X, and the space remaining in a page is less @@ -430,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si  		  "wqe_size %u is greater than max SQ WQEBBs %u",  		  wqe_size, mlx5e_get_max_sq_wqebbs(mdev)); -  	return MLX5E_STOP_ROOM(wqe_size);  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 4685c652c97e..20507ef2f956 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -117,7 +117,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,  	xdpi.page.rq = rq;  	dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf); -	dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE); +	dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);  	if (unlikely(xdp_frame_has_frags(xdpf))) {  		sinfo = xdp_get_shared_info_from_frame(xdpf); @@ -131,7 +131,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,  				skb_frag_off(frag);  			len = skb_frag_size(frag);  			dma_sync_single_for_device(sq->pdev, addr, len, -						   DMA_TO_DEVICE); +						   DMA_BIDIRECTIONAL);  		}  	} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index 285d32d2fd08..d7c020f72401 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -365,7 +365,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)  	for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)  		accel_fs_tcp_destroy_table(fs, i); -	kfree(accel_tcp); +	kvfree(accel_tcp);  	mlx5e_fs_set_accel_tcp(fs, NULL);  } @@ -397,7 +397,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)  err_destroy_tables:  	while (--i >= 0)  		accel_fs_tcp_destroy_table(fs, i); -	kfree(accel_tcp); +	kvfree(accel_tcp);  	mlx5e_fs_set_accel_tcp(fs, NULL);  	return err;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 2a8fd7020622..a715601865d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -101,7 +101,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)  	struct xfrm_replay_state_esn *replay_esn;  	u32 seq_bottom = 0;  	u8 overlap; -	u32 *esn;  	if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {  		sa_entry->esn_state.trigger = 0; @@ -116,11 +115,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)  	sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,  						    htonl(seq_bottom)); -	esn = &sa_entry->esn_state.esn;  	sa_entry->esn_state.trigger = 1;  	if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { -		++(*esn);  		sa_entry->esn_state.overlap = 0;  		return true;  	} else if (unlikely(!overlap && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index 41970067917b..f900709639f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -368,15 +368,15 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,  	obj_attrs.aso_pdn = macsec->aso.pdn;  	obj_attrs.epn_state = sa->epn_state; -	if (is_tx) { -		obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci); -		key = &ctx->sa.tx_sa->key; -	} else { -		obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci); -		key = &ctx->sa.rx_sa->key; +	key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key; + +	if (sa->epn_state.epn_enabled) { +		obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) : +					   cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci); + +		memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));  	} -	memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));  	obj_attrs.replay_window = ctx->secy->replay_window;  	obj_attrs.replay_protect = ctx->secy->replay_protect; @@ -427,15 +427,15 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)  	return NULL;  } -static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec, -				     struct mlx5e_macsec_sa *rx_sa, -				     bool active) +static int macsec_rx_sa_active_update(struct macsec_context *ctx, +				      struct mlx5e_macsec_sa *rx_sa, +				      bool active)  { -	struct mlx5_core_dev *mdev = macsec->mdev; -	struct mlx5_macsec_obj_attrs attrs; +	struct mlx5e_priv *priv = netdev_priv(ctx->netdev); +	struct mlx5e_macsec *macsec = priv->macsec;  	int err = 0; -	if (rx_sa->active != active) +	if (rx_sa->active == active)  		return 0;  	rx_sa->active = active; @@ -444,13 +444,11 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,  		return 0;  	} -	attrs.sci = rx_sa->sci; -	attrs.enc_key_id = rx_sa->enc_key_id; -	err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id); +	err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);  	if (err) -		return err; +		rx_sa->active = false; -	return 0; +	return err;  }  static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx) @@ -476,6 +474,11 @@ static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)  		return false;  	} +	if (!ctx->secy->tx_sc.encrypt) { +		netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n"); +		return false; +	} +  	return true;  } @@ -620,6 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)  	if (tx_sa->active == ctx_tx_sa->active)  		goto out; +	tx_sa->active = ctx_tx_sa->active;  	if (tx_sa->assoc_num != tx_sc->encoding_sa)  		goto out; @@ -635,8 +639,6 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)  		mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);  	} - -	tx_sa->active = ctx_tx_sa->active;  out:  	mutex_unlock(&macsec->lock); @@ -736,9 +738,14 @@ static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)  	sc_xarray_element->rx_sc = rx_sc;  	err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element, -		       XA_LIMIT(1, USHRT_MAX), GFP_KERNEL); -	if (err) +		       XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL); +	if (err) { +		if (err == -EBUSY) +			netdev_err(ctx->netdev, +				   "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n", +				   MLX5_MACEC_RX_FS_ID_MAX);  		goto destroy_sc_xarray_elemenet; +	}  	rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);  	if (!rx_sc->md_dst) { @@ -798,16 +805,16 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)  		goto out;  	} -	rx_sc->active = ctx_rx_sc->active;  	if (rx_sc->active == ctx_rx_sc->active)  		goto out; +	rx_sc->active = ctx_rx_sc->active;  	for (i = 0; i < MACSEC_NUM_AN; ++i) {  		rx_sa = rx_sc->rx_sa[i];  		if (!rx_sa)  			continue; -		err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active); +		err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);  		if (err)  			goto out;  	} @@ -818,16 +825,43 @@ out:  	return err;  } +static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc) +{ +	struct mlx5e_macsec_sa *rx_sa; +	int i; + +	for (i = 0; i < MACSEC_NUM_AN; ++i) { +		rx_sa = rx_sc->rx_sa[i]; +		if (!rx_sa) +			continue; + +		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); +		mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); + +		kfree(rx_sa); +		rx_sc->rx_sa[i] = NULL; +	} + +	/* At this point the relevant MACsec offload Rx rule already removed at +	 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current +	 * Rx related data propagating using xa_erase which uses rcu to sync, +	 * once fs_id is erased then this rx_sc is hidden from datapath. +	 */ +	list_del_rcu(&rx_sc->rx_sc_list_element); +	xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id); +	metadata_dst_free(rx_sc->md_dst); +	kfree(rx_sc->sc_xarray_element); +	kfree_rcu(rx_sc); +} +  static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)  {  	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);  	struct mlx5e_macsec_device *macsec_device;  	struct mlx5e_macsec_rx_sc *rx_sc; -	struct mlx5e_macsec_sa *rx_sa;  	struct mlx5e_macsec *macsec;  	struct list_head *list;  	int err = 0; -	int i;  	mutex_lock(&priv->macsec->lock); @@ -849,31 +883,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)  		goto out;  	} -	for (i = 0; i < MACSEC_NUM_AN; ++i) { -		rx_sa = rx_sc->rx_sa[i]; -		if (!rx_sa) -			continue; - -		mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); -		mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); - -		kfree(rx_sa); -		rx_sc->rx_sa[i] = NULL; -	} - -/* - * At this point the relevant MACsec offload Rx rule already removed at - * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current - * Rx related data propagating using xa_erase which uses rcu to sync, - * once fs_id is erased then this rx_sc is hidden from datapath. - */ -	list_del_rcu(&rx_sc->rx_sc_list_element); -	xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id); -	metadata_dst_free(rx_sc->md_dst); -	kfree(rx_sc->sc_xarray_element); - -	kfree_rcu(rx_sc); - +	macsec_del_rxsc_ctx(macsec, rx_sc);  out:  	mutex_unlock(&macsec->lock); @@ -999,11 +1009,11 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)  	}  	rx_sa = rx_sc->rx_sa[assoc_num]; -	if (rx_sa) { +	if (!rx_sa) {  		netdev_err(ctx->netdev, -			   "MACsec offload rx_sc sci %lld rx_sa %d already exist\n", +			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",  			   sci, assoc_num); -		err = -EEXIST; +		err = -EINVAL;  		goto out;  	} @@ -1015,7 +1025,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)  		goto out;  	} -	err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active); +	err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);  out:  	mutex_unlock(&macsec->lock); @@ -1055,11 +1065,11 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)  	}  	rx_sa = rx_sc->rx_sa[assoc_num]; -	if (rx_sa) { +	if (!rx_sa) {  		netdev_err(ctx->netdev, -			   "MACsec offload rx_sc sci %lld rx_sa %d already exist\n", +			   "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",  			   sci, assoc_num); -		err = -EEXIST; +		err = -EINVAL;  		goto out;  	} @@ -1155,7 +1165,7 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,  				continue;  			if (rx_sa->active) { -				err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false); +				err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);  				if (err)  					goto out;  			} @@ -1234,7 +1244,6 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)  	struct mlx5e_priv *priv = netdev_priv(ctx->netdev);  	struct mlx5e_macsec_device *macsec_device;  	struct mlx5e_macsec_rx_sc *rx_sc, *tmp; -	struct mlx5e_macsec_sa *rx_sa;  	struct mlx5e_macsec_sa *tx_sa;  	struct mlx5e_macsec *macsec;  	struct list_head *list; @@ -1263,28 +1272,15 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)  	}  	list = &macsec_device->macsec_rx_sc_list_head; -	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) { -		for (i = 0; i < MACSEC_NUM_AN; ++i) { -			rx_sa = rx_sc->rx_sa[i]; -			if (!rx_sa) -				continue; - -			mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); -			mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); -			kfree(rx_sa); -			rx_sc->rx_sa[i] = NULL; -		} - -		list_del_rcu(&rx_sc->rx_sc_list_element); - -		kfree_rcu(rx_sc); -	} +	list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) +		macsec_del_rxsc_ctx(macsec, rx_sc);  	kfree(macsec_device->dev_addr);  	macsec_device->dev_addr = NULL;  	list_del_rcu(&macsec_device->macsec_device_list_element);  	--macsec->num_of_devices; +	kfree(macsec_device);  out:  	mutex_unlock(&macsec->lock); @@ -1536,6 +1532,8 @@ static void macsec_async_event(struct work_struct *work)  	async_work = container_of(work, struct mlx5e_macsec_async_work, work);  	macsec = async_work->macsec; +	mutex_lock(&macsec->lock); +  	mdev = async_work->mdev;  	obj_id = async_work->obj_id;  	macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id); @@ -1557,6 +1555,7 @@ static void macsec_async_event(struct work_struct *work)  out_async_work:  	kfree(async_work); +	mutex_unlock(&macsec->lock);  }  static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data) @@ -1745,7 +1744,7 @@ void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,  	if (!macsec)  		return; -	fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data); +	fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);  	rcu_read_lock();  	sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id); @@ -1846,25 +1845,16 @@ err_hash:  void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)  {  	struct mlx5e_macsec *macsec = priv->macsec; -	struct mlx5_core_dev *mdev = macsec->mdev; +	struct mlx5_core_dev *mdev = priv->mdev;  	if (!macsec)  		return;  	mlx5_notifier_unregister(mdev, &macsec->nb); -  	mlx5e_macsec_fs_cleanup(macsec->macsec_fs); - -	/* Cleanup workqueue */  	destroy_workqueue(macsec->wq); -  	mlx5e_macsec_aso_cleanup(&macsec->aso, mdev); - -	priv->macsec = NULL; -  	rhashtable_destroy(&macsec->sci_hash); -  	mutex_destroy(&macsec->lock); -  	kfree(macsec);  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h index d580b4a91253..347380a2cd9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h @@ -10,9 +10,11 @@  #include <net/macsec.h>  #include <net/dst_metadata.h> -/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */ +/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */ +#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */ +#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX  #define MLX5_MACSEC_METADATA_MARKER(metadata)  ((((metadata) >> 30) & 0x3)  == 0x1) -#define MLX5_MACSEC_METADATA_HANDLE(metadata)  ((metadata) & GENMASK(3, 0)) +#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata)  ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)  struct mlx5e_priv;  struct mlx5e_macsec; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c index 13dc628b988a..5b658a5588c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c @@ -250,7 +250,7 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)  	struct mlx5_flow_handle *rule;  	struct mlx5_flow_spec *spec;  	u32 *flow_group_in; -	int err = 0; +	int err;  	ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);  	if (!ns) @@ -261,8 +261,10 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)  		return -ENOMEM;  	flow_group_in = kvzalloc(inlen, GFP_KERNEL); -	if (!flow_group_in) +	if (!flow_group_in) { +		err = -ENOMEM;  		goto out_spec; +	}  	tx_tables = &tx_fs->tables;  	ft_crypto = &tx_tables->ft_crypto; @@ -898,7 +900,7 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)  	struct mlx5_flow_handle *rule;  	struct mlx5_flow_spec *spec;  	u32 *flow_group_in; -	int err = 0; +	int err;  	ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);  	if (!ns) @@ -909,8 +911,10 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)  		return -ENOMEM;  	flow_group_in = kvzalloc(inlen, GFP_KERNEL); -	if (!flow_group_in) +	if (!flow_group_in) { +		err = -ENOMEM;  		goto free_spec; +	}  	rx_tables = &rx_fs->tables;  	ft_crypto = &rx_tables->ft_crypto; @@ -1142,10 +1146,10 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,  	ft_crypto = &rx_tables->ft_crypto;  	/* Set bit[31 - 30] macsec marker - 0x01 */ -	/* Set bit[3-0] fs id */ +	/* Set bit[15-0] fs id */  	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);  	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); -	MLX5_SET(set_action_in, action, data, fs_id | BIT(30)); +	MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30));  	MLX5_SET(set_action_in, action, offset, 0);  	MLX5_SET(set_action_in, action, length, 32); @@ -1180,7 +1184,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,  	rx_rule->rule[0] = rule;  	/* Rx crypto table without SCI rule */ -	if (cpu_to_be64((__force u64)attrs->sci) & ntohs(MACSEC_PORT_ES)) { +	if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {  		memset(spec, 0, sizeof(struct mlx5_flow_spec));  		memset(&dest, 0, sizeof(struct mlx5_flow_destination));  		memset(&flow_act, 0, sizeof(flow_act)); @@ -1205,6 +1209,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,  		rx_rule->rule[1] = rule;  	} +	kvfree(spec);  	return macsec_rule;  err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 24aa25da482b..1728e197558d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -35,7 +35,6 @@  #include "en.h"  #include "en/port.h"  #include "en/params.h" -#include "en/xsk/pool.h"  #include "en/ptp.h"  #include "lib/clock.h"  #include "en/fs_ethtool.h" @@ -412,15 +411,8 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,  				struct ethtool_channels *ch)  {  	mutex_lock(&priv->state_lock); -  	ch->max_combined   = priv->max_nch;  	ch->combined_count = priv->channels.params.num_channels; -	if (priv->xsk.refcnt) { -		/* The upper half are XSK queues. */ -		ch->max_combined *= 2; -		ch->combined_count *= 2; -	} -  	mutex_unlock(&priv->state_lock);  } @@ -454,16 +446,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,  	mutex_lock(&priv->state_lock); -	/* Don't allow changing the number of channels if there is an active -	 * XSK, because the numeration of the XSK and regular RQs will change. -	 */ -	if (priv->xsk.refcnt) { -		err = -EINVAL; -		netdev_err(priv->netdev, "%s: AF_XDP is active, cannot change the number of channels\n", -			   __func__); -		goto out; -	} -  	/* Don't allow changing the number of channels if HTB offload is active,  	 * because the numeration of the QoS SQs will change, while per-queue  	 * qdiscs are attached. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 364f04309149..5e41dfdf79c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -206,10 +206,11 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)  static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)  {  	u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); +	u32 sz; -	WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD); +	sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT); -	return entries * umr_entry_size / MLX5_OCTWORD; +	return sz / MLX5_OCTWORD;  }  static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, @@ -5694,6 +5695,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)  		mlx5e_fs_set_state_destroy(priv->fs,  					   !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); +	/* Validate the max_wqe_size_sq capability. */ +	if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) { +		mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n", +			       mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS); +		return -EIO; +	} +  	/* max number of channels may have changed */  	max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);  	if (priv->channels.params.num_channels > max_nch) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 58084650151f..a61a43fc8d5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -266,7 +266,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_uni  	addr = page_pool_get_dma_addr(au->page);  	/* Non-XSK always uses PAGE_SIZE. */ -	dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE); +	dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir);  	return true;  } @@ -282,8 +282,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u  		return -ENOMEM;  	/* Non-XSK always uses PAGE_SIZE. */ -	addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE, -				  rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); +	addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir);  	if (unlikely(dma_mapping_error(rq->pdev, addr))) {  		page_pool_recycle_direct(rq->page_pool, au->page);  		au->page = NULL; @@ -427,14 +426,15 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,  {  	dma_addr_t addr = page_pool_get_dma_addr(au->page); -	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE); +	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, +				rq->buff.map_dir);  	page_ref_inc(au->page);  	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,  			au->page, frag_offset, len, truesize);  }  static inline void -mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, +mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,  		      struct page *page, dma_addr_t addr,  		      int offset_from, int dma_offset, u32 headlen)  { @@ -442,7 +442,8 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,  	/* Aligning len to sizeof(long) optimizes memcpy performance */  	unsigned int len = ALIGN(headlen, sizeof(long)); -	dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE); +	dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len, +				rq->buff.map_dir);  	skb_copy_to_linear_data(skb, from, len);  } @@ -1538,7 +1539,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,  	addr = page_pool_get_dma_addr(au->page);  	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, -				      frag_size, DMA_FROM_DEVICE); +				      frag_size, rq->buff.map_dir);  	net_prefetch(data);  	prog = rcu_dereference(rq->xdp_prog); @@ -1587,7 +1588,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi  	addr = page_pool_get_dma_addr(au->page);  	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, -				      rq->buff.frame0_sz, DMA_FROM_DEVICE); +				      rq->buff.frame0_sz, rq->buff.map_dir);  	net_prefetchw(va); /* xdp_frame data area */  	net_prefetch(va + rx_headroom); @@ -1608,7 +1609,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi  		addr = page_pool_get_dma_addr(au->page);  		dma_sync_single_for_cpu(rq->pdev, addr + wi->offset, -					frag_consumed_bytes, DMA_FROM_DEVICE); +					frag_consumed_bytes, rq->buff.map_dir);  		if (!xdp_buff_has_frags(&xdp)) {  			/* Init on the first fragment to avoid cold cache access @@ -1905,7 +1906,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w  	mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);  	/* copy header */  	addr = page_pool_get_dma_addr(head_au->page); -	mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr, +	mlx5e_copy_skb_header(rq, skb, head_au->page, addr,  			      head_offset, head_offset, headlen);  	/* skb linear part was allocated with headlen and aligned to long */  	skb->tail += headlen; @@ -1939,7 +1940,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,  	addr = page_pool_get_dma_addr(au->page);  	dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset, -				      frag_size, DMA_FROM_DEVICE); +				      frag_size, rq->buff.map_dir);  	net_prefetch(data);  	prog = rcu_dereference(rq->xdp_prog); @@ -1987,7 +1988,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,  	if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {  		/* build SKB around header */ -		dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE); +		dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);  		prefetchw(hdr);  		prefetch(data);  		skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); @@ -2009,7 +2010,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,  		}  		prefetchw(skb->data); -		mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr, +		mlx5e_copy_skb_header(rq, skb, head->page, head->addr,  				      head_offset + rx_headroom,  				      rx_headroom, head_size);  		/* skb linear part was allocated with headlen and aligned to long */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 70a7a61f9708..bd9936af4582 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1405,8 +1405,13 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,  			      struct mlx5e_tc_flow *flow,  			      struct mlx5_flow_spec *spec)  { +	struct mlx5e_tc_mod_hdr_acts mod_acts = {}; +	struct mlx5e_mod_hdr_handle *mh = NULL;  	struct mlx5_flow_attr *slow_attr;  	struct mlx5_flow_handle *rule; +	bool fwd_and_modify_cap; +	u32 chain_mapping = 0; +	int err;  	slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);  	if (!slow_attr) @@ -1417,13 +1422,56 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,  	slow_attr->esw_attr->split_count = 0;  	slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; +	fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table); +	if (!fwd_and_modify_cap) +		goto skip_restore; + +	err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping); +	if (err) +		goto err_get_chain; + +	err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, +					CHAIN_TO_REG, chain_mapping); +	if (err) +		goto err_reg_set; + +	mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow), +				  MLX5_FLOW_NAMESPACE_FDB, &mod_acts); +	if (IS_ERR(mh)) { +		err = PTR_ERR(mh); +		goto err_attach; +	} + +	slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; +	slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh); + +skip_restore:  	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); -	if (!IS_ERR(rule)) -		flow_flag_set(flow, SLOW); +	if (IS_ERR(rule)) { +		err = PTR_ERR(rule); +		goto err_offload; +	} +	flow->slow_mh = mh; +	flow->chain_mapping = chain_mapping; +	flow_flag_set(flow, SLOW); + +	mlx5e_mod_hdr_dealloc(&mod_acts);  	kfree(slow_attr);  	return rule; + +err_offload: +	if (fwd_and_modify_cap) +		mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh); +err_attach: +err_reg_set: +	if (fwd_and_modify_cap) +		mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping); +err_get_chain: +	mlx5e_mod_hdr_dealloc(&mod_acts); +	kfree(slow_attr); +	return ERR_PTR(err);  }  void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, @@ -1441,7 +1489,17 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,  	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;  	slow_attr->esw_attr->split_count = 0;  	slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; +	if (flow->slow_mh) { +		slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; +		slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh); +	}  	mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); +	if (flow->slow_mh) { +		mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh); +		mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping); +		flow->chain_mapping = 0; +		flow->slow_mh = NULL; +	}  	flow_flag_clear(flow, SLOW);  	kfree(slow_attr);  } @@ -1576,7 +1634,6 @@ set_encap_dests(struct mlx5e_priv *priv,  		struct mlx5e_tc_flow *flow,  		struct mlx5_flow_attr *attr,  		struct netlink_ext_ack *extack, -		bool *encap_valid,  		bool *vf_tun)  {  	struct mlx5e_tc_flow_parse_attr *parse_attr; @@ -1593,7 +1650,6 @@ set_encap_dests(struct mlx5e_priv *priv,  	parse_attr = attr->parse_attr;  	esw_attr = attr->esw_attr;  	*vf_tun = false; -	*encap_valid = true;  	for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {  		struct net_device *out_dev; @@ -1610,7 +1666,7 @@ set_encap_dests(struct mlx5e_priv *priv,  			goto out;  		}  		err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index, -					 extack, &encap_dev, encap_valid); +					 extack, &encap_dev);  		dev_put(out_dev);  		if (err)  			goto out; @@ -1674,8 +1730,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,  	struct mlx5e_tc_flow_parse_attr *parse_attr;  	struct mlx5_flow_attr *attr = flow->attr;  	struct mlx5_esw_flow_attr *esw_attr; -	bool vf_tun, encap_valid;  	u32 max_prio, max_chain; +	bool vf_tun;  	int err = 0;  	parse_attr = attr->parse_attr; @@ -1765,7 +1821,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,  		esw_attr->int_port = int_port;  	} -	err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun); +	err = set_encap_dests(priv, flow, attr, extack, &vf_tun);  	if (err)  		goto err_out; @@ -1795,7 +1851,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,  	 * (1) there's no error  	 * (2) there's an encap action and we don't have valid neigh  	 */ -	if (!encap_valid || flow_flag_test(flow, SLOW)) +	if (flow_flag_test(flow, SLOW))  		flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);  	else  		flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); @@ -3575,6 +3631,14 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,  	attr2->action = 0;  	attr2->flags = 0;  	attr2->parse_attr = parse_attr; +	attr2->dest_chain = 0; +	attr2->dest_ft = NULL; + +	if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { +		attr2->esw_attr->out_count = 0; +		attr2->esw_attr->split_count = 0; +	} +  	return attr2;  } @@ -3693,7 +3757,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)  	struct mlx5e_post_act *post_act = get_post_action(flow->priv);  	struct mlx5_flow_attr *attr, *next_attr = NULL;  	struct mlx5e_post_act_handle *handle; -	bool vf_tun, encap_valid = true; +	bool vf_tun;  	int err;  	/* This is going in reverse order as needed. @@ -3715,13 +3779,10 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)  		if (list_is_last(&attr->list, &flow->attrs))  			break; -		err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun); +		err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);  		if (err)  			goto out_free; -		if (!encap_valid) -			flow_flag_set(flow, SLOW); -  		err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);  		if (err)  			goto out_free; @@ -4008,6 +4069,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,  	struct mlx5e_tc_flow_parse_attr *parse_attr;  	struct mlx5_flow_attr *attr = flow->attr;  	struct mlx5_esw_flow_attr *esw_attr; +	struct net_device *filter_dev;  	int err;  	err = flow_action_supported(flow_action, extack); @@ -4016,6 +4078,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,  	esw_attr = attr->esw_attr;  	parse_attr = attr->parse_attr; +	filter_dev = parse_attr->filter_dev;  	parse_state = &parse_attr->parse_state;  	mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);  	parse_state->ct_priv = get_ct_priv(priv); @@ -4025,13 +4088,21 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,  		return err;  	/* Forward to/from internal port can only have 1 dest */ -	if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) && +	if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&  	    esw_attr->out_count > 1) {  		NL_SET_ERR_MSG_MOD(extack,  				   "Rules with internal port can have only one destination");  		return -EOPNOTSUPP;  	} +	/* Forward from tunnel/internal port to internal port is not supported */ +	if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) && +	    esw_attr->dest_int_port) { +		NL_SET_ERR_MSG_MOD(extack, +				   "Forwarding from tunnel/internal port to internal port is not supported"); +		return -EOPNOTSUPP; +	} +  	err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);  	if (err)  		return err; @@ -4686,12 +4757,6 @@ int mlx5e_policer_validate(const struct flow_action *action,  		return -EOPNOTSUPP;  	} -	if (act->police.rate_pkt_ps) { -		NL_SET_ERR_MSG_MOD(extack, -				   "QoS offload not support packets per second"); -		return -EOPNOTSUPP; -	} -  	return 0;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index bf2232a2a836..f7897ddb29c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -305,6 +305,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at  	u16 ds_cnt_inl = 0;  	u16 ds_cnt_ids = 0; +	/* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */ +  	if (attr->insz)  		ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,  					  MLX5_SEND_WQE_DS); @@ -317,6 +319,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at  			inl += VLAN_HLEN;  		ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); +		if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS)) +			netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl, +				    (u16)MLX5E_MAX_TX_INLINE_DS);  		ds_cnt += ds_cnt_inl;  	} @@ -392,6 +397,11 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,  	if (unlikely(sq->ptpsq)) {  		mlx5e_skb_cb_hwtstamp_init(skb);  		mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb); +		if (!netif_tx_queue_stopped(sq->txq) && +		    !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) { +			netif_tx_stop_queue(sq->txq); +			sq->stats->stopped++; +		}  		skb_get(skb);  	} @@ -868,6 +878,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)  	if (netif_tx_queue_stopped(sq->txq) &&  	    mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && +	    mlx5e_ptpsq_fifo_has_room(sq) &&  	    !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {  		netif_tx_wake_queue(sq->txq);  		stats->wake++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c59107fa9e6d..374e3fbdc2cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1362,6 +1362,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)  		devl_rate_nodes_destroy(devlink);  	} +	/* Destroy legacy fdb when disabling sriov in legacy mode. */ +	if (esw->mode == MLX5_ESWITCH_LEGACY) +		mlx5_eswitch_disable_locked(esw);  	esw->esw_funcs.num_vfs = 0; @@ -1387,12 +1390,14 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)  		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",  		 esw->esw_funcs.num_vfs, esw->enabled_vports); -	esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; -	if (esw->mode == MLX5_ESWITCH_OFFLOADS) -		esw_offloads_disable(esw); -	else if (esw->mode == MLX5_ESWITCH_LEGACY) -		esw_legacy_disable(esw); -	mlx5_esw_acls_ns_cleanup(esw); +	if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) { +		esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; +		if (esw->mode == MLX5_ESWITCH_OFFLOADS) +			esw_offloads_disable(esw); +		else if (esw->mode == MLX5_ESWITCH_LEGACY) +			esw_legacy_disable(esw); +		mlx5_esw_acls_ns_cleanup(esw); +	}  	if (esw->mode == MLX5_ESWITCH_OFFLOADS)  		devl_rate_nodes_destroy(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index f68dc2d0dbe6..3029bc1c0dd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -736,6 +736,14 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,  					      struct mlx5_eswitch *slave_esw);  int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); +static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw) +{ +	if (mlx5_esw_allowed(esw)) +		return esw->esw_funcs.num_vfs; + +	return 0; +} +  #else  /* CONFIG_MLX5_ESWITCH */  /* eswitch API stubs */  static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 4e50df3139c6..8c6c9bcb3dc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -433,7 +433,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f  		    mlx5_lag_mpesw_is_activated(esw->dev))  			dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;  	} -	if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { +	if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {  		if (pkt_reformat) {  			flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;  			flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; @@ -2310,7 +2310,7 @@ out_free:  static int esw_offloads_start(struct mlx5_eswitch *esw,  			      struct netlink_ext_ack *extack)  { -	int err, err1; +	int err;  	esw->mode = MLX5_ESWITCH_OFFLOADS;  	err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); @@ -2318,11 +2318,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,  		NL_SET_ERR_MSG_MOD(extack,  				   "Failed setting eswitch to offloads");  		esw->mode = MLX5_ESWITCH_LEGACY; -		err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); -		if (err1) { -			NL_SET_ERR_MSG_MOD(extack, -					   "Failed setting eswitch back to legacy"); -		}  		mlx5_rescan_drivers(esw->dev);  	}  	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { @@ -3389,19 +3384,19 @@ err_metadata:  static int esw_offloads_stop(struct mlx5_eswitch *esw,  			     struct netlink_ext_ack *extack)  { -	int err, err1; +	int err;  	esw->mode = MLX5_ESWITCH_LEGACY; + +	/* If changing from switchdev to legacy mode without sriov enabled, +	 * no need to create legacy fdb. +	 */ +	if (!mlx5_sriov_is_enabled(esw->dev)) +		return 0; +  	err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); -	if (err) { +	if (err)  		NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); -		esw->mode = MLX5_ESWITCH_OFFLOADS; -		err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); -		if (err1) { -			NL_SET_ERR_MSG_MOD(extack, -					   "Failed setting eswitch back to offloads"); -		} -	}  	return err;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index ee568bf34ae2..edd910258314 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,  		     sizeof(dest->vport.num), hash);  	hash = jhash((const void *)&dest->vport.vhca_id,  		     sizeof(dest->vport.num), hash); -	if (dest->vport.pkt_reformat) -		hash = jhash(dest->vport.pkt_reformat, -			     sizeof(*dest->vport.pkt_reformat), +	if (flow_act->pkt_reformat) +		hash = jhash(flow_act->pkt_reformat, +			     sizeof(*flow_act->pkt_reformat),  			     hash);  	return hash;  } @@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,  	if (ret)  		return ret; -	return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ? -	       memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat, -		      sizeof(*dest1->vport.pkt_reformat)) : 0; +	if (flow_act1->pkt_reformat && flow_act2->pkt_reformat) +		return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat, +			      sizeof(*flow_act1->pkt_reformat)); + +	return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat);  }  static int @@ -310,6 +312,8 @@ revert_changes:  	for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {  		struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl; +		attr->dests[curr_dest].termtbl = NULL; +  		/* search for the destination associated with the  		 * current term table  		 */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index e8896f368362..1e46f9afa40e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -9,7 +9,8 @@ enum {  	MLX5_FW_RESET_FLAGS_RESET_REQUESTED,  	MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,  	MLX5_FW_RESET_FLAGS_PENDING_COMP, -	MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS +	MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, +	MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED  };  struct mlx5_fw_reset { @@ -152,7 +153,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)  		mlx5_unload_one(dev);  		if (mlx5_health_wait_pci_up(dev))  			mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); -		mlx5_load_one(dev, false); +		else +			mlx5_load_one(dev, false);  		devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,  							BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |  							BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); @@ -358,6 +360,23 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)  		err = -ETIMEDOUT;  	} +	do { +		err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, ®16); +		if (err) +			return err; +		if (reg16 == dev_id) +			break; +		msleep(20); +	} while (!time_after(jiffies, timeout)); + +	if (reg16 == dev_id) { +		mlx5_core_info(dev, "Firmware responds to PCI config cycles again\n"); +	} else { +		mlx5_core_err(dev, "Firmware is not responsive (0x%04x) after %llu ms\n", +			      reg16, mlx5_tout_ms(dev, PCI_TOGGLE)); +		err = -ETIMEDOUT; +	} +  restore:  	list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {  		pci_cfg_access_unlock(sdev); @@ -388,7 +407,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)  	err = mlx5_pci_link_toggle(dev);  	if (err) {  		mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err); -		goto done; +		set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);  	}  	mlx5_enter_error_state(dev, true); @@ -464,6 +483,10 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)  		goto out;  	}  	err = fw_reset->ret; +	if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) { +		mlx5_unload_one_devl_locked(dev); +		mlx5_load_one_devl_locked(dev, false); +	}  out:  	clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);  	return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index a9f4ede4a9bf..32c3e0a649a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -228,9 +228,8 @@ static void mlx5_ldev_free(struct kref *ref)  	if (ldev->nb.notifier_call)  		unregister_netdevice_notifier_net(&init_net, &ldev->nb);  	mlx5_lag_mp_cleanup(ldev); -	mlx5_lag_mpesw_cleanup(ldev); -	cancel_work_sync(&ldev->mpesw_work);  	destroy_workqueue(ldev->wq); +	mlx5_lag_mpesw_cleanup(ldev);  	mutex_destroy(&ldev->lock);  	kfree(ldev);  } @@ -701,10 +700,13 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)  			return false;  #ifdef CONFIG_MLX5_ESWITCH -	dev = ldev->pf[MLX5_LAG_P1].dev; -	if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev)) -		return false; +	for (i = 0; i < ldev->ports; i++) { +		dev = ldev->pf[i].dev; +		if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev)) +			return false; +	} +	dev = ldev->pf[MLX5_LAG_P1].dev;  	mode = mlx5_eswitch_mode(dev);  	for (i = 0; i < ldev->ports; i++)  		if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index ce2ce8ccbd70..f30ac2de639f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -50,6 +50,19 @@ struct lag_tracker {  	enum netdev_lag_hash hash_type;  }; +enum mpesw_op { +	MLX5_MPESW_OP_ENABLE, +	MLX5_MPESW_OP_DISABLE, +}; + +struct mlx5_mpesw_work_st { +	struct work_struct work; +	struct mlx5_lag    *lag; +	enum mpesw_op	   op; +	struct completion  comp; +	int result; +}; +  /* LAG data of a ConnectX card.   * It serves both its phys functions.   */ @@ -66,7 +79,6 @@ struct mlx5_lag {  	struct lag_tracker        tracker;  	struct workqueue_struct   *wq;  	struct delayed_work       bond_work; -	struct work_struct	  mpesw_work;  	struct notifier_block     nb;  	struct lag_mp             lag_mp;  	struct mlx5_lag_port_sel  port_sel; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index f643202b29c6..c17e8f1ec914 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -7,63 +7,95 @@  #include "eswitch.h"  #include "lib/mlx5.h" -void mlx5_mpesw_work(struct work_struct *work) +static int add_mpesw_rule(struct mlx5_lag *ldev)  { -	struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work); +	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; +	int err; -	mutex_lock(&ldev->lock); -	mlx5_disable_lag(ldev); -	mutex_unlock(&ldev->lock); -} +	if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1) +		return 0; -static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev) -{ -	struct mlx5_lag *ldev = dev->priv.lag; +	if (ldev->mode != MLX5_LAG_MODE_NONE) { +		err = -EINVAL; +		goto out_err; +	} -	if (!queue_work(ldev->wq, &ldev->mpesw_work)) -		mlx5_core_warn(dev, "failed to queue work\n"); +	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false); +	if (err) { +		mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err); +		goto out_err; +	} + +	return 0; + +out_err: +	atomic_dec(&ldev->lag_mpesw.mpesw_rule_count); +	return err;  } -void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev) +static void del_mpesw_rule(struct mlx5_lag *ldev)  { -	struct mlx5_lag *ldev = dev->priv.lag; +	if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) && +	    ldev->mode == MLX5_LAG_MODE_MPESW) +		mlx5_disable_lag(ldev); +} -	if (!ldev) -		return; +static void mlx5_mpesw_work(struct work_struct *work) +{ +	struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work); +	struct mlx5_lag *ldev = mpesww->lag;  	mutex_lock(&ldev->lock); -	if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) && -	    ldev->mode == MLX5_LAG_MODE_MPESW) -		mlx5_lag_disable_mpesw(dev); +	if (mpesww->op == MLX5_MPESW_OP_ENABLE) +		mpesww->result = add_mpesw_rule(ldev); +	else if (mpesww->op == MLX5_MPESW_OP_DISABLE) +		del_mpesw_rule(ldev);  	mutex_unlock(&ldev->lock); + +	complete(&mpesww->comp);  } -int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev) +static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev, +				     enum mpesw_op op)  {  	struct mlx5_lag *ldev = dev->priv.lag; +	struct mlx5_mpesw_work_st *work;  	int err = 0;  	if (!ldev)  		return 0; -	mutex_lock(&ldev->lock); -	if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1) -		goto out; +	work = kzalloc(sizeof(*work), GFP_KERNEL); +	if (!work) +		return -ENOMEM; -	if (ldev->mode != MLX5_LAG_MODE_NONE) { +	INIT_WORK(&work->work, mlx5_mpesw_work); +	init_completion(&work->comp); +	work->op = op; +	work->lag = ldev; + +	if (!queue_work(ldev->wq, &work->work)) { +		mlx5_core_warn(dev, "failed to queue mpesw work\n");  		err = -EINVAL;  		goto out;  	} - -	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false); -	if (err) -		mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err); - +	wait_for_completion(&work->comp); +	err = work->result;  out: -	mutex_unlock(&ldev->lock); +	kfree(work);  	return err;  } +void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev) +{ +	mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE); +} + +int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev) +{ +	return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE); +} +  int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)  {  	struct mlx5_lag *ldev = mdev->priv.lag; @@ -71,12 +103,9 @@ int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)  	if (!netif_is_bond_master(out_dev) || !ldev)  		return 0; -	mutex_lock(&ldev->lock); -	if (ldev->mode == MLX5_LAG_MODE_MPESW) { -		mutex_unlock(&ldev->lock); +	if (ldev->mode == MLX5_LAG_MODE_MPESW)  		return -EOPNOTSUPP; -	} -	mutex_unlock(&ldev->lock); +  	return 0;  } @@ -90,11 +119,10 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)  void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)  { -	INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);  	atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);  }  void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)  { -	cancel_delayed_work_sync(&ldev->bond_work); +	WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h index be4abcb8fcd5..88e8daffcf92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h @@ -12,7 +12,6 @@ struct lag_mpesw {  	atomic_t mpesw_rule_count;  }; -void mlx5_mpesw_work(struct work_struct *work);  int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);  bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);  #if IS_ENABLED(CONFIG_MLX5_ESWITCH) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c index baa8092f335e..c971ff04dd04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c @@ -3,6 +3,7 @@  #include <linux/mlx5/device.h>  #include <linux/mlx5/transobj.h> +#include "clock.h"  #include "aso.h"  #include "wq.h" @@ -179,6 +180,7 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,  {  	void *in, *sqc, *wq;  	int inlen, err; +	u8 ts_format;  	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +  		sizeof(u64) * sq->wq_ctrl.buf.npages; @@ -195,6 +197,11 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,  	MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);  	MLX5_SET(sqc,  sqc, flush_in_error_en, 1); +	ts_format = mlx5_is_real_time_sq(mdev) ? +			MLX5_TIMESTAMP_FORMAT_REAL_TIME : +			MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; +	MLX5_SET(sqc, sqc, ts_format, ts_format); +  	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);  	MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.hw_objs.bfreg.index);  	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift - diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 839a01da110f..8ff16318e32d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -122,7 +122,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)  {  	struct mlx5_mpfs *mpfs = dev->priv.mpfs; -	if (!MLX5_ESWITCH_MANAGER(dev)) +	if (!mpfs)  		return;  	WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +137,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)  	int err = 0;  	u32 index; -	if (!MLX5_ESWITCH_MANAGER(dev)) +	if (!mpfs)  		return 0;  	mutex_lock(&mpfs->lock); @@ -185,7 +185,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)  	int err = 0;  	u32 index; -	if (!MLX5_ESWITCH_MANAGER(dev)) +	if (!mpfs)  		return 0;  	mutex_lock(&mpfs->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 0b459d841c3a..e58775a7d955 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1798,7 +1798,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,  	res = state == pci_channel_io_perm_failure ?  		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; -	mlx5_pci_trace(dev, "Exit, result = %d, %s\n",  res, result2str(res)); +	mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, result = %d, %s\n", +		       __func__, dev->state, dev->pci_status, res, result2str(res));  	return res;  } @@ -1837,7 +1838,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)  	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);  	int err; -	mlx5_pci_trace(dev, "Enter\n"); +	mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Enter\n", +		       __func__, dev->state, dev->pci_status);  	err = mlx5_pci_enable_device(dev);  	if (err) { @@ -1859,7 +1861,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)  	res = PCI_ERS_RESULT_RECOVERED;  out: -	mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res)); +	mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, err = %d, result = %d, %s\n", +		       __func__, dev->state, dev->pci_status, err, res, result2str(res));  	return res;  } @@ -1872,6 +1875,10 @@ static void mlx5_pci_resume(struct pci_dev *pdev)  	err = mlx5_load_one(dev, false); +	if (!err) +		devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter, +						     DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); +  	mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,  		       !err ? "recovered" : "Failed");  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index 7da012ff0d41..8e2abbab05f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -18,6 +18,10 @@ struct mlx5_sf_dev_table {  	phys_addr_t base_address;  	u64 sf_bar_length;  	struct notifier_block nb; +	struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */ +	struct workqueue_struct *active_wq; +	struct work_struct work; +	u8 stop_active_wq:1;  	struct mlx5_core_dev *dev;  }; @@ -168,6 +172,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_  		return 0;  	sf_index = event->function_id - base_id; +	mutex_lock(&table->table_lock);  	sf_dev = xa_load(&table->devices, sf_index);  	switch (event->new_vhca_state) {  	case MLX5_VHCA_STATE_INVALID: @@ -191,6 +196,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_  	default:  		break;  	} +	mutex_unlock(&table->table_lock);  	return 0;  } @@ -215,6 +221,78 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)  	return 0;  } +static void mlx5_sf_dev_add_active_work(struct work_struct *work) +{ +	struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work); +	u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; +	struct mlx5_core_dev *dev = table->dev; +	u16 max_functions; +	u16 function_id; +	u16 sw_func_id; +	int err = 0; +	u8 state; +	int i; + +	max_functions = mlx5_sf_max_functions(dev); +	function_id = MLX5_CAP_GEN(dev, sf_base_id); +	for (i = 0; i < max_functions; i++, function_id++) { +		if (table->stop_active_wq) +			return; +		err = mlx5_cmd_query_vhca_state(dev, function_id, out, sizeof(out)); +		if (err) +			/* A failure of specific vhca doesn't mean others will +			 * fail as well. +			 */ +			continue; +		state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); +		if (state != MLX5_VHCA_STATE_ACTIVE) +			continue; + +		sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id); +		mutex_lock(&table->table_lock); +		/* Don't probe device which is already probe */ +		if (!xa_load(&table->devices, i)) +			mlx5_sf_dev_add(dev, i, function_id, sw_func_id); +		/* There is a race where SF got inactive after the query +		 * above. e.g.: the query returns that the state of the +		 * SF is active, and after that the eswitch manager set it to +		 * inactive. +		 * This case cannot be managed in SW, since the probing of the +		 * SF is on one system, and the inactivation is on a different +		 * system. +		 * If the inactive is done after the SF perform init_hca(), +		 * the SF will fully probe and then removed. If it was +		 * done before init_hca(), the SF probe will fail. +		 */ +		mutex_unlock(&table->table_lock); +	} +} + +/* In case SFs are generated externally, probe active SFs */ +static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) +{ +	if (MLX5_CAP_GEN(table->dev, eswitch_manager)) +		return 0; /* the table is local */ + +	/* Use a workqueue to probe active SFs, which are in large +	 * quantity and may take up to minutes to probe. +	 */ +	table->active_wq = create_singlethread_workqueue("mlx5_active_sf"); +	if (!table->active_wq) +		return -ENOMEM; +	INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work); +	queue_work(table->active_wq, &table->work); +	return 0; +} + +static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table) +{ +	if (table->active_wq) { +		table->stop_active_wq = true; +		destroy_workqueue(table->active_wq); +	} +} +  void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)  {  	struct mlx5_sf_dev_table *table; @@ -240,11 +318,17 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)  	table->base_address = pci_resource_start(dev->pdev, 2);  	table->max_sfs = max_sfs;  	xa_init(&table->devices); +	mutex_init(&table->table_lock);  	dev->priv.sf_dev_table = table;  	err = mlx5_vhca_event_notifier_register(dev, &table->nb);  	if (err)  		goto vhca_err; + +	err = mlx5_sf_dev_queue_active_work(table); +	if (err) +		goto add_active_err; +  	err = mlx5_sf_dev_vhca_arm_all(table);  	if (err)  		goto arm_err; @@ -252,6 +336,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)  	return;  arm_err: +	mlx5_sf_dev_destroy_active_work(table); +add_active_err:  	mlx5_vhca_event_notifier_unregister(dev, &table->nb);  vhca_err:  	table->max_sfs = 0; @@ -279,7 +365,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)  	if (!table)  		return; +	mlx5_sf_dev_destroy_active_work(table);  	mlx5_vhca_event_notifier_unregister(dev, &table->nb); +	mutex_destroy(&table->table_lock);  	/* Now that event handler is not running, it is safe to destroy  	 * the sf device without race. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index ddfaf7891188..91ff19f67695 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -1200,7 +1200,8 @@ free_rule:  	}  remove_from_nic_tbl: -	mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher); +	if (!nic_matcher->rules) +		mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);  free_hw_ste:  	mlx5dr_domain_nic_unlock(nic_dmn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index 31d443dd8386..f68461b13391 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -46,7 +46,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,  int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,  				 struct mlx5dr_action *action)  { -	int ret; +	int ret = -EOPNOTSUPP;  	if (action && action->action_type != DR_ACTION_TYP_FT)  		return -EOPNOTSUPP; @@ -67,6 +67,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,  			goto out;  	} +	if (ret) +		goto out; +  	/* Release old action */  	if (tbl->miss_action)  		refcount_dec(&tbl->miss_action->refcount); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 4efccd942fb8..1290b2d3eae6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -3470,6 +3470,8 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,  	u16 vid;  	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; +	if (!vxlan_fdb_info->offloaded) +		return;  	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);  	if (!bridge_device) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 468520079c65..e6acd1e7b263 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6851,7 +6851,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)  	char banner[sizeof(version)];  	struct ksz_switch *sw = NULL; -	result = pci_enable_device(pdev); +	result = pcim_enable_device(pdev);  	if (result)  		return result; diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 81a8ccca7e5e..5693784eec5b 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -359,7 +359,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,  		goto err_out;  	usleep_range(26, 100); -	while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) && +	while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&  	       (mistat & BUSY))  		cpu_relax(); @@ -397,7 +397,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,  		goto err_out;  	usleep_range(26, 100); -	while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) && +	while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&  	       (mistat & BUSY))  		cpu_relax(); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c index e58a27fd8b50..06811c60d598 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c @@ -656,7 +656,15 @@ void lan966x_stats_get(struct net_device *dev,  	stats->rx_dropped = dev->stats.rx_dropped +  		lan966x->stats[idx + SYS_COUNT_RX_LONG] +  		lan966x->stats[idx + SYS_COUNT_DR_LOCAL] + -		lan966x->stats[idx + SYS_COUNT_DR_TAIL]; +		lan966x->stats[idx + SYS_COUNT_DR_TAIL] + +		lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_0] + +		lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_1] + +		lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_2] + +		lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_3] + +		lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_4] + +		lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_5] + +		lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_6] + +		lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_7];  	for (i = 0; i < LAN966X_NUM_TC; i++) {  		stats->rx_dropped += @@ -708,6 +716,9 @@ int lan966x_stats_init(struct lan966x *lan966x)  	snprintf(queue_name, sizeof(queue_name), "%s-stats",  		 dev_name(lan966x->dev));  	lan966x->stats_queue = create_singlethread_workqueue(queue_name); +	if (!lan966x->stats_queue) +		return -ENOMEM; +  	INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);  	queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,  			   LAN966X_STATS_CHECK_DELAY); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index 7e4061c854f0..e6948939ccc2 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -309,6 +309,7 @@ static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)  		lan966x, FDMA_CH_DB_DISCARD);  	tx->activated = false; +	tx->last_in_use = -1;  }  static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) @@ -413,13 +414,15 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)  	/* Get the received frame and unmap it */  	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];  	page = rx->page[rx->dcb_index][rx->db_index]; + +	dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr, +				FDMA_DCB_STATUS_BLOCKL(db->status), +				DMA_FROM_DEVICE); +  	skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);  	if (unlikely(!skb))  		goto unmap_page; -	dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr, -			 FDMA_DCB_STATUS_BLOCKL(db->status), -			 DMA_FROM_DEVICE);  	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));  	lan966x_ifh_get_src_port(skb->data, &src_port); @@ -428,6 +431,10 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)  	if (WARN_ON(src_port >= lan966x->num_phys_ports))  		goto free_skb; +	dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, +			       PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, +			       DMA_ATTR_SKIP_CPU_SYNC); +  	skb->dev = lan966x->ports[src_port]->dev;  	skb_pull(skb, IFH_LEN * sizeof(u32)); @@ -453,9 +460,9 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)  free_skb:  	kfree_skb(skb);  unmap_page: -	dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr, -		       FDMA_DCB_STATUS_BLOCKL(db->status), -		       DMA_FROM_DEVICE); +	dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, +			       PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, +			       DMA_ATTR_SKIP_CPU_SYNC);  	__free_pages(page, rx->page_order);  	return NULL; @@ -667,12 +674,14 @@ static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)  	int i;  	for (i = 0; i < lan966x->num_phys_ports; ++i) { +		struct lan966x_port *port;  		int mtu; -		if (!lan966x->ports[i]) +		port = lan966x->ports[i]; +		if (!port)  			continue; -		mtu = lan966x->ports[i]->dev->mtu; +		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));  		if (mtu > max_mtu)  			max_mtu = mtu;  	} @@ -687,17 +696,14 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x)  static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)  { -	void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf; -	dma_addr_t rx_dma, tx_dma; +	dma_addr_t rx_dma; +	void *rx_dcbs;  	u32 size;  	int err;  	/* Store these for later to free them */  	rx_dma = lan966x->rx.dma; -	tx_dma = lan966x->tx.dma;  	rx_dcbs = lan966x->rx.dcbs; -	tx_dcbs = lan966x->tx.dcbs; -	tx_dcbs_buf = lan966x->tx.dcbs_buf;  	napi_synchronize(&lan966x->napi);  	napi_disable(&lan966x->napi); @@ -715,17 +721,6 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)  	size = ALIGN(size, PAGE_SIZE);  	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); -	lan966x_fdma_tx_disable(&lan966x->tx); -	err = lan966x_fdma_tx_alloc(&lan966x->tx); -	if (err) -		goto restore_tx; - -	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; -	size = ALIGN(size, PAGE_SIZE); -	dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma); - -	kfree(tx_dcbs_buf); -  	lan966x_fdma_wakeup_netdev(lan966x);  	napi_enable(&lan966x->napi); @@ -735,11 +730,6 @@ restore:  	lan966x->rx.dcbs = rx_dcbs;  	lan966x_fdma_rx_start(&lan966x->rx); -restore_tx: -	lan966x->tx.dma = tx_dma; -	lan966x->tx.dcbs = tx_dcbs; -	lan966x->tx.dcbs_buf = tx_dcbs_buf; -  	return err;  } @@ -751,6 +741,8 @@ int lan966x_fdma_change_mtu(struct lan966x *lan966x)  	max_mtu = lan966x_fdma_get_max_mtu(lan966x);  	max_mtu += IFH_LEN * sizeof(u32); +	max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +	max_mtu += VLAN_HLEN * 2;  	if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==  	    lan966x->rx.page_order) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index be2fd030cccb..20ee5b28f70a 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -386,7 +386,7 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)  	int old_mtu = dev->mtu;  	int err; -	lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu), +	lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),  	       lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));  	dev->mtu = new_mtu; @@ -395,7 +395,7 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)  	err = lan966x_fdma_change_mtu(lan966x);  	if (err) { -		lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu), +		lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)),  		       lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));  		dev->mtu = old_mtu;  	} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 9656071b8289..4ec33999e4df 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -26,6 +26,8 @@  #define LAN966X_BUFFER_MEMORY		(160 * 1024)  #define LAN966X_BUFFER_MIN_SZ		60 +#define LAN966X_HW_MTU(mtu)		((mtu) + ETH_HLEN + ETH_FCS_LEN) +  #define PGID_AGGR			64  #define PGID_SRC			80  #define PGID_ENTRIES			89 diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h index 1d90b93dd417..fb5087fef22e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -585,6 +585,21 @@ enum lan966x_target {  #define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\  	FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) +/*      DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV_MAC_TAGS_CFG(t)       __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 12, 0, 1, 4) + +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA        BIT(1) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(x)\ +	FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_GET(x)\ +	FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) + +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA            BIT(0) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ +	FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ +	FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +  /*      DEV:MAC_CFG_STATUS:MAC_IFG_CFG */  #define DEV_MAC_IFG_CFG(t)        __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c index 8d7260cd7da9..3c44660128da 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c @@ -169,6 +169,12 @@ void lan966x_vlan_port_apply(struct lan966x_port *port)  		ANA_VLAN_CFG_VLAN_POP_CNT,  		lan966x, ANA_VLAN_CFG(port->chip_port)); +	lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(port->vlan_aware) | +		DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(port->vlan_aware), +		DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | +		DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, +		lan966x, DEV_MAC_TAGS_CFG(port->chip_port)); +  	/* Drop frames with multicast source address */  	val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1);  	if (port->vlan_aware && !pvid) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index 6b0febcb7fa9..01f3a3a41cdb 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1253,6 +1253,9 @@ int sparx_stats_init(struct sparx5 *sparx5)  	snprintf(queue_name, sizeof(queue_name), "%s-stats",  		 dev_name(sparx5->dev));  	sparx5->stats_queue = create_singlethread_workqueue(queue_name); +	if (!sparx5->stats_queue) +		return -ENOMEM; +  	INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work);  	queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,  			   SPX5_STATS_CHECK_DELAY); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 66360c8c5a38..141897dfe388 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -317,7 +317,7 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)  	next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry);  	db_hw = &next_dcb_hw->db[0];  	if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) -		tx->dropped++; +		return -EINVAL;  	db = list_first_entry(&tx->db_list, struct sparx5_db, list);  	list_move_tail(&db->list, &tx->db_list);  	next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 62a325e96345..b6bbb3c9bd7a 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -659,6 +659,9 @@ static int sparx5_start(struct sparx5 *sparx5)  	snprintf(queue_name, sizeof(queue_name), "%s-mact",  		 dev_name(sparx5->dev));  	sparx5->mact_queue = create_singlethread_workqueue(queue_name); +	if (!sparx5->mact_queue) +		return -ENOMEM; +  	INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work);  	queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,  			   SPX5_MACT_PULL_DELAY); @@ -884,6 +887,8 @@ static int mchp_sparx5_probe(struct platform_device *pdev)  cleanup_ports:  	sparx5_cleanup_ports(sparx5); +	if (sparx5->mact_queue) +		destroy_workqueue(sparx5->mact_queue);  cleanup_config:  	kfree(configs);  cleanup_pnode: @@ -908,6 +913,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev)  	sparx5_cleanup_ports(sparx5);  	/* Unregister netdevs */  	sparx5_unregister_notifier_blocks(sparx5); +	destroy_workqueue(sparx5->mact_queue);  	return 0;  } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c index 19516ccad533..d078156581d5 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -104,7 +104,7 @@ static int sparx5_port_open(struct net_device *ndev)  	err = phylink_of_phy_connect(port->phylink, port->of_node, 0);  	if (err) {  		netdev_err(ndev, "Could not attach to PHY\n"); -		return err; +		goto err_connect;  	}  	phylink_start(port->phylink); @@ -116,10 +116,20 @@ static int sparx5_port_open(struct net_device *ndev)  			err = sparx5_serdes_set(port->sparx5, port, &port->conf);  		else  			err = phy_power_on(port->serdes); -		if (err) +		if (err) {  			netdev_err(ndev, "%s failed\n", __func__); +			goto out_power; +		}  	} +	return 0; + +out_power: +	phylink_stop(port->phylink); +	phylink_disconnect_phy(port->phylink); +err_connect: +	sparx5_port_enable(port, false); +  	return err;  } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 83c16ca5b30f..6db6ac6a3bbc 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -234,9 +234,8 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)  	sparx5_set_port_ifh(ifh, port->portno);  	if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { -		ret = sparx5_ptp_txtstamp_request(port, skb); -		if (ret) -			return ret; +		if (sparx5_ptp_txtstamp_request(port, skb) < 0) +			return NETDEV_TX_BUSY;  		sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);  		sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type); @@ -250,23 +249,31 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)  	else  		ret = sparx5_inject(sparx5, ifh, skb, dev); -	if (ret == NETDEV_TX_OK) { -		stats->tx_bytes += skb->len; -		stats->tx_packets++; +	if (ret == -EBUSY) +		goto busy; +	if (ret < 0) +		goto drop; -		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && -		    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) -			return ret; +	stats->tx_bytes += skb->len; +	stats->tx_packets++; +	sparx5->tx.packets++; -		dev_kfree_skb_any(skb); -	} else { -		stats->tx_dropped++; +	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && +	    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) +		return NETDEV_TX_OK; -		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && -		    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) -			sparx5_ptp_txtstamp_release(port, skb); -	} -	return ret; +	dev_consume_skb_any(skb); +	return NETDEV_TX_OK; +drop: +	stats->tx_dropped++; +	sparx5->tx.dropped++; +	dev_kfree_skb_any(skb); +	return NETDEV_TX_OK; +busy: +	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && +	    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) +		sparx5_ptp_txtstamp_release(port, skb); +	return NETDEV_TX_BUSY;  }  static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c index e05429c751ee..dc2c3756e3a2 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c @@ -90,13 +90,10 @@ static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,  			}  		} -		sparx5_tc_ets_add(port, params); -		break; +		return sparx5_tc_ets_add(port, params);  	case TC_ETS_DESTROY: -		sparx5_tc_ets_del(port); - -		break; +		return sparx5_tc_ets_del(port);  	case TC_ETS_GRAFT:  		return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h index 4a6efe6ada08..65c24ee49efd 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -498,7 +498,14 @@ enum {  #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) -#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT +/* Advertise to the NIC firmware: the NAPI work_done variable race is fixed, + * so the driver is able to reliably support features like busy_poll. + */ +#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) + +#define GDMA_DRV_CAP_FLAGS1 \ +	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ +	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)  #define GDMA_DRV_CAP_FLAGS2 0 diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 9259a74eca40..27a0f3af8aab 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1303,10 +1303,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq)  		xdp_do_flush();  } -static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) +static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)  {  	struct mana_cq *cq = context;  	u8 arm_bit; +	int w;  	WARN_ON_ONCE(cq->gdma_cq != gdma_queue); @@ -1315,26 +1316,31 @@ static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)  	else  		mana_poll_tx_cq(cq); -	if (cq->work_done < cq->budget && -	    napi_complete_done(&cq->napi, cq->work_done)) { +	w = cq->work_done; + +	if (w < cq->budget && +	    napi_complete_done(&cq->napi, w)) {  		arm_bit = SET_ARM_BIT;  	} else {  		arm_bit = 0;  	}  	mana_gd_ring_cq(gdma_queue, arm_bit); + +	return w;  }  static int mana_poll(struct napi_struct *napi, int budget)  {  	struct mana_cq *cq = container_of(napi, struct mana_cq, napi); +	int w;  	cq->work_done = 0;  	cq->budget = budget; -	mana_cq_handler(cq, cq->gdma_cq); +	w = mana_cq_handler(cq, cq->gdma_cq); -	return min(cq->work_done, budget); +	return min(w, budget);  }  static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue) diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index dcf8212119f9..1d3c4474b7cb 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7128,9 +7128,8 @@ static int s2io_card_up(struct s2io_nic *sp)  		if (ret) {  			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",  				  dev->name); -			s2io_reset(sp); -			free_rx_buffers(sp); -			return -ENOMEM; +			ret = -ENOMEM; +			goto err_fill_buff;  		}  		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,  			  ring->rx_bufs_left); @@ -7168,18 +7167,16 @@ static int s2io_card_up(struct s2io_nic *sp)  	/* Enable Rx Traffic and interrupts on the NIC */  	if (start_nic(sp)) {  		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); -		s2io_reset(sp); -		free_rx_buffers(sp); -		return -ENODEV; +		ret = -ENODEV; +		goto err_out;  	}  	/* Add interrupt service routine */  	if (s2io_add_isr(sp) != 0) {  		if (sp->config.intr_type == MSI_X)  			s2io_rem_isr(sp); -		s2io_reset(sp); -		free_rx_buffers(sp); -		return -ENODEV; +		ret = -ENODEV; +		goto err_out;  	}  	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); @@ -7199,6 +7196,20 @@ static int s2io_card_up(struct s2io_nic *sp)  	}  	return 0; + +err_out: +	if (config->napi) { +		if (config->intr_type == MSI_X) { +			for (i = 0; i < sp->config.rx_ring_num; i++) +				napi_disable(&sp->mac_control.rings[i].napi); +		} else { +			napi_disable(&sp->napi); +		} +	} +err_fill_buff: +	s2io_reset(sp); +	free_rx_buffers(sp); +	return ret;  }  /** diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c index 2b427d8ccb2f..ccacb6ab6c39 100644 --- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c @@ -282,7 +282,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)  	dma_len = skb_headlen(skb);  	if (skb_is_gso(skb))  		type = NFDK_DESC_TX_TYPE_TSO; -	else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD) +	else if (!nr_frags && dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)  		type = NFDK_DESC_TX_TYPE_SIMPLE;  	else  		type = NFDK_DESC_TX_TYPE_GATHER; @@ -927,7 +927,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,  	dma_len = pkt_len;  	dma_addr = rxbuf->dma_addr + dma_off; -	if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD) +	if (dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)  		type = NFDK_DESC_TX_TYPE_SIMPLE;  	else  		type = NFDK_DESC_TX_TYPE_GATHER; @@ -1325,7 +1325,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,  	txbuf = &tx_ring->ktxbufs[wr_idx];  	dma_len = skb_headlen(skb); -	if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD) +	if (dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)  		type = NFDK_DESC_TX_TYPE_SIMPLE;  	else  		type = NFDK_DESC_TX_TYPE_GATHER; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 405786c00334..cb08d7bf9524 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -341,7 +341,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)  		return ret;  	attrs.split = eth_port.is_split; -	attrs.splittable = !attrs.split; +	attrs.splittable = eth_port.port_lanes > 1 && !attrs.split;  	attrs.lanes = eth_port.port_lanes;  	attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;  	attrs.phys.port_number = eth_port.label_port; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index e66e548919d4..71301dbd8fb5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -716,16 +716,26 @@ static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf)  	return val;  } -static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff) +static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf)  {  	struct nfp_nsp *nsp;  	char hwinfo[32]; +	bool sp_indiff;  	int err;  	nsp = nfp_nsp_open(pf->cpp);  	if (IS_ERR(nsp)) -		return PTR_ERR(nsp); +		return; + +	if (!nfp_nsp_has_hwinfo_set(nsp)) +		goto end; +	sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) || +		    (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF); + +	/* No need to clean `sp_indiff` in driver, management firmware +	 * will do it when application firmware is unloaded. +	 */  	snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff);  	err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));  	/* Not a fatal error, no need to return error to stop driver from loading */ @@ -739,21 +749,8 @@ static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff)  		pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);  	} +end:  	nfp_nsp_close(nsp); -	return 0; -} - -static int nfp_pf_nsp_cfg(struct nfp_pf *pf) -{ -	bool sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) || -			 (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF); - -	return nfp_pf_cfg_hwinfo(pf, sp_indiff); -} - -static void nfp_pf_nsp_clean(struct nfp_pf *pf) -{ -	nfp_pf_cfg_hwinfo(pf, false);  }  static int nfp_pci_probe(struct pci_dev *pdev, @@ -856,13 +853,11 @@ static int nfp_pci_probe(struct pci_dev *pdev,  		goto err_fw_unload;  	} -	err = nfp_pf_nsp_cfg(pf); -	if (err) -		goto err_fw_unload; +	nfp_pf_cfg_hwinfo(pf);  	err = nfp_net_pci_probe(pf);  	if (err) -		goto err_nsp_clean; +		goto err_fw_unload;  	err = nfp_hwmon_register(pf);  	if (err) { @@ -874,8 +869,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,  err_net_remove:  	nfp_net_pci_remove(pf); -err_nsp_clean: -	nfp_pf_nsp_clean(pf);  err_fw_unload:  	kfree(pf->rtbl);  	nfp_mip_close(pf->mip); @@ -915,7 +908,6 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)  	nfp_net_pci_remove(pf); -	nfp_pf_nsp_clean(pf);  	vfree(pf->dumpspec);  	kfree(pf->rtbl);  	nfp_mip_close(pf->mip); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 22a5d2419084..991059d6cb32 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1432,6 +1432,9 @@ nfp_port_get_module_info(struct net_device *netdev,  	u8 data;  	port = nfp_port_from_netdev(netdev); +	if (!port) +		return -EOPNOTSUPP; +  	/* update port state to get latest interface */  	set_bit(NFP_PORT_CHANGED, &port->flags);  	eth_port = nfp_port_get_eth_port(port); @@ -1477,15 +1480,15 @@ nfp_port_get_module_info(struct net_device *netdev,  		if (data < 0x3) {  			modinfo->type = ETH_MODULE_SFF_8436; -			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; +			modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;  		} else {  			modinfo->type = ETH_MODULE_SFF_8636; -			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; +			modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;  		}  		break;  	case NFP_INTERFACE_QSFP28:  		modinfo->type = ETH_MODULE_SFF_8636; -		modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; +		modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;  		break;  	default:  		netdev_err(netdev, "Unsupported module 0x%x detected\n", diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 3db4a2431741..62320be4de5a 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -249,25 +249,26 @@ static void nixge_hw_dma_bd_release(struct net_device *ndev)  	struct sk_buff *skb;  	int i; -	for (i = 0; i < RX_BD_NUM; i++) { -		phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], -						     phys); - -		dma_unmap_single(ndev->dev.parent, phys_addr, -				 NIXGE_MAX_JUMBO_FRAME_SIZE, -				 DMA_FROM_DEVICE); - -		skb = (struct sk_buff *)(uintptr_t) -			nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], -						 sw_id_offset); -		dev_kfree_skb(skb); -	} +	if (priv->rx_bd_v) { +		for (i = 0; i < RX_BD_NUM; i++) { +			phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], +							     phys); + +			dma_unmap_single(ndev->dev.parent, phys_addr, +					 NIXGE_MAX_JUMBO_FRAME_SIZE, +					 DMA_FROM_DEVICE); + +			skb = (struct sk_buff *)(uintptr_t) +				nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], +							 sw_id_offset); +			dev_kfree_skb(skb); +		} -	if (priv->rx_bd_v)  		dma_free_coherent(ndev->dev.parent,  				  sizeof(*priv->rx_bd_v) * RX_BD_NUM,  				  priv->rx_bd_v,  				  priv->rx_bd_p); +	}  	if (priv->tx_skb)  		devm_kfree(ndev->dev.parent, priv->tx_skb); @@ -900,6 +901,7 @@ static int nixge_open(struct net_device *ndev)  err_rx_irq:  	free_irq(priv->tx_irq, ndev);  err_tx_irq: +	napi_disable(&priv->napi);  	phy_stop(phy);  	phy_disconnect(phy);  	tasklet_kill(&priv->dma_err_tasklet); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 3f2c30184752..28b7cec485ef 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1143,6 +1143,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,  		buffer_info->dma = 0;  		buffer_info->time_stamp = 0;  		tx_ring->next_to_use = ring_num; +		dev_kfree_skb_any(skb);  		return;  	}  	buffer_info->mapped = true; @@ -2459,6 +2460,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)  	unregister_netdev(netdev);  	pch_gbe_phy_hw_reset(&adapter->hw); +	pci_dev_put(adapter->ptp_pdev);  	free_netdev(netdev);  } @@ -2533,7 +2535,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,  	/* setup the private structure */  	ret = pch_gbe_sw_init(adapter);  	if (ret) -		goto err_free_netdev; +		goto err_put_dev;  	/* Initialize PHY */  	ret = pch_gbe_init_phy(adapter); @@ -2591,6 +2593,8 @@ static int pch_gbe_probe(struct pci_dev *pdev,  err_free_adapter:  	pch_gbe_phy_hw_reset(&adapter->hw); +err_put_dev: +	pci_dev_put(adapter->ptp_pdev);  err_free_netdev:  	free_netdev(netdev);  	return ret; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 5d58fd99be3c..19d4848df17d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2817,11 +2817,15 @@ err_out:  	 * than the full array, but leave the qcq shells in place  	 */  	for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { -		lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; -		ionic_qcq_free(lif, lif->txqcqs[i]); +		if (lif->txqcqs && lif->txqcqs[i]) { +			lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; +			ionic_qcq_free(lif, lif->txqcqs[i]); +		} -		lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; -		ionic_qcq_free(lif, lif->rxqcqs[i]); +		if (lif->rxqcqs && lif->rxqcqs[i]) { +			lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; +			ionic_qcq_free(lif, lif->rxqcqs[i]); +		}  	}  	if (err) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 56f93b030551..5456c2b15d9b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -687,8 +687,14 @@ int ionic_port_reset(struct ionic *ionic)  static int __init ionic_init_module(void)  { +	int ret; +  	ionic_debugfs_create(); -	return ionic_bus_register_driver(); +	ret = ionic_bus_register_driver(); +	if (ret) +		ionic_debugfs_destroy(); + +	return ret;  }  static void __exit ionic_cleanup_module(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9fb1fa479d4b..16e6bd466143 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -767,34 +767,34 @@ static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,  	return rc;  } -#define CONFIG_QEDE_BITMAP_IDX		BIT(0) -#define CONFIG_QED_SRIOV_BITMAP_IDX	BIT(1) -#define CONFIG_QEDR_BITMAP_IDX		BIT(2) -#define CONFIG_QEDF_BITMAP_IDX		BIT(4) -#define CONFIG_QEDI_BITMAP_IDX		BIT(5) -#define CONFIG_QED_LL2_BITMAP_IDX	BIT(6) +#define BITMAP_IDX_FOR_CONFIG_QEDE	BIT(0) +#define BITMAP_IDX_FOR_CONFIG_QED_SRIOV	BIT(1) +#define BITMAP_IDX_FOR_CONFIG_QEDR	BIT(2) +#define BITMAP_IDX_FOR_CONFIG_QEDF	BIT(4) +#define BITMAP_IDX_FOR_CONFIG_QEDI	BIT(5) +#define BITMAP_IDX_FOR_CONFIG_QED_LL2	BIT(6)  static u32 qed_get_config_bitmap(void)  {  	u32 config_bitmap = 0x0;  	if (IS_ENABLED(CONFIG_QEDE)) -		config_bitmap |= CONFIG_QEDE_BITMAP_IDX; +		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDE;  	if (IS_ENABLED(CONFIG_QED_SRIOV)) -		config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX; +		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_SRIOV;  	if (IS_ENABLED(CONFIG_QED_RDMA)) -		config_bitmap |= CONFIG_QEDR_BITMAP_IDX; +		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDR;  	if (IS_ENABLED(CONFIG_QED_FCOE)) -		config_bitmap |= CONFIG_QEDF_BITMAP_IDX; +		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDF;  	if (IS_ENABLED(CONFIG_QED_ISCSI)) -		config_bitmap |= CONFIG_QEDI_BITMAP_IDX; +		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDI;  	if (IS_ENABLED(CONFIG_QED_LL2)) -		config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX; +		config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_LL2;  	return config_bitmap;  } diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 76072f8c3d2f..0d57ffcedf0c 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2471,6 +2471,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,  					     skb_shinfo(skb)->nr_frags);  	if (tx_cb->seg_count == -1) {  		netdev_err(ndev, "%s: invalid segment count!\n", __func__); +		dev_kfree_skb_any(skb);  		return NETDEV_TX_OK;  	} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index bd0607680329..2fd5c6fdb500 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2991,7 +2991,7 @@ static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)  		QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);  		dev_info(&adapter->pdev->dev,  			 "%s: lock recovery initiated\n", __func__); -		msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY); +		mdelay(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);  		val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);  		id = ((val >> 2) & 0xF);  		if (id == adapter->portnum) { @@ -3027,7 +3027,7 @@ int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)  		if (status)  			break; -		msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY); +		mdelay(QLC_83XX_DRV_LOCK_WAIT_DELAY);  		i++;  		if (i == 1) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 36324126db6d..33f723a9f471 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -841,7 +841,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)  				napi_gro_receive(&priv->napi[q],  						 priv->rx_1st_skb);  				stats->rx_packets++; -				stats->rx_bytes += priv->rx_1st_skb->len; +				stats->rx_bytes += pkt_len;  				break;  			}  		} @@ -3020,6 +3020,7 @@ static int __maybe_unused ravb_resume(struct device *dev)  		ret = ravb_open(ndev);  		if (ret < 0)  			return ret; +		ravb_set_rx_mode(ndev);  		netif_device_attach(ndev);  	} diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d1e1aa19a68e..7022fb2005a2 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3277,6 +3277,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)  	bool was_enabled = efx->port_enabled;  	int rc; +#ifdef CONFIG_SFC_SRIOV +	/* If this function is a VF and we have access to the parent PF, +	 * then use the PF control path to attempt to change the VF MAC address. +	 */ +	if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { +		struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); +		struct efx_ef10_nic_data *nic_data = efx->nic_data; +		u8 mac[ETH_ALEN]; + +		/* net_dev->dev_addr can be zeroed by efx_net_stop in +		 * efx_ef10_sriov_set_vf_mac, so pass in a copy. +		 */ +		ether_addr_copy(mac, efx->net_dev->dev_addr); + +		rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac); +		if (!rc) +			return 0; + +		netif_dbg(efx, drv, efx->net_dev, +			  "Updating VF mac via PF failed (%d), setting directly\n", +			  rc); +	} +#endif +  	efx_device_detach_sync(efx);  	efx_net_stop(efx->net_dev); @@ -3297,40 +3321,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)  		efx_net_open(efx->net_dev);  	efx_device_attach_if_not_resetting(efx); -#ifdef CONFIG_SFC_SRIOV -	if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { -		struct efx_ef10_nic_data *nic_data = efx->nic_data; -		struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; - -		if (rc == -EPERM) { -			struct efx_nic *efx_pf; - -			/* Switch to PF and change MAC address on vport */ -			efx_pf = pci_get_drvdata(pci_dev_pf); - -			rc = efx_ef10_sriov_set_vf_mac(efx_pf, -						       nic_data->vf_index, -						       efx->net_dev->dev_addr); -		} else if (!rc) { -			struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); -			struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; -			unsigned int i; - -			/* MAC address successfully changed by VF (with MAC -			 * spoofing) so update the parent PF if possible. -			 */ -			for (i = 0; i < efx_pf->vf_count; ++i) { -				struct ef10_vf *vf = nic_data->vf + i; - -				if (vf->efx == efx) { -					ether_addr_copy(vf->mac, -							efx->net_dev->dev_addr); -					return 0; -				} -			} -		} -	} else -#endif  	if (rc == -EPERM) {  		netif_err(efx, drv, efx->net_dev,  			  "Cannot change MAC address; use sfboot to enable" diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 88fa29572e23..ddcc325ed570 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -218,6 +218,7 @@ netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,  		   skb->len, skb->data_len, channel->channel);  	if (!efx->n_channels || !efx->n_tx_channels || !channel) {  		netif_stop_queue(net_dev); +		dev_kfree_skb_any(skb);  		goto err;  	} diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 054d5ce6029e..0556542d7a6b 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1059,8 +1059,10 @@ static int efx_pci_probe(struct pci_dev *pci_dev,  	/* Allocate and initialise a struct net_device */  	net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); -	if (!net_dev) -		return -ENOMEM; +	if (!net_dev) { +		rc = -ENOMEM; +		goto fail0; +	}  	probe_ptr = netdev_priv(net_dev);  	*probe_ptr = probe_data;  	efx->net_dev = net_dev; @@ -1132,6 +1134,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,  	WARN_ON(rc > 0);  	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);  	free_netdev(net_dev); + fail0: +	kfree(probe_data);  	return rc;  } diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index be72e71da027..5f201a547e5b 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -162,9 +162,9 @@ struct efx_filter_spec {  	u32	priority:2;  	u32	flags:6;  	u32	dmaq_id:12; -	u32	vport_id;  	u32	rss_context; -	__be16	outer_vid __aligned(4); /* allow jhash2() of match values */ +	u32	vport_id; +	__be16	outer_vid;  	__be16	inner_vid;  	u8	loc_mac[ETH_ALEN];  	u8	rem_mac[ETH_ALEN]; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 4826e6a7e4ce..9220afeddee8 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -660,17 +660,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,  	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))  		return false; -	return memcmp(&left->outer_vid, &right->outer_vid, +	return memcmp(&left->vport_id, &right->vport_id,  		      sizeof(struct efx_filter_spec) - -		      offsetof(struct efx_filter_spec, outer_vid)) == 0; +		      offsetof(struct efx_filter_spec, vport_id)) == 0;  }  u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)  { -	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); -	return jhash2((const u32 *)&spec->outer_vid, +	BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3); +	return jhash2((const u32 *)&spec->vport_id,  		      (sizeof(struct efx_filter_spec) - -		       offsetof(struct efx_filter_spec, outer_vid)) / 4, +		       offsetof(struct efx_filter_spec, vport_id)) / 4,  		      0);  } diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 2240f6d0b89b..9b46579b5a10 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1961,11 +1961,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)  			ret = PTR_ERR(priv->phydev);  			dev_err(priv->dev, "get_phy_device err(%d)\n", ret);  			priv->phydev = NULL; +			mdiobus_unregister(bus);  			return -ENODEV;  		}  		ret = phy_device_register(priv->phydev);  		if (ret) { +			phy_device_free(priv->phydev);  			mdiobus_unregister(bus);  			dev_err(priv->dev,  				"phy_device_register err(%d)\n", ret); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 1fa09b49ba7f..d2c6a5dfdc0e 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1229,6 +1229,8 @@ static int ave_init(struct net_device *ndev)  	phy_support_asym_pause(phydev); +	phydev->mac_managed_pm = true; +  	phy_attached_info(phydev);  	return 0; @@ -1756,6 +1758,10 @@ static int ave_resume(struct device *dev)  	ave_global_reset(ndev); +	ret = phy_init_hw(ndev->phydev); +	if (ret) +		return ret; +  	ave_ethtool_get_wol(ndev, &wol);  	wol.wolopts = priv->wolopts;  	__ave_ethtool_set_wol(ndev, &wol); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 0a2afc1a3124..7deb1f817dac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -629,7 +629,6 @@ static int ehl_common_data(struct pci_dev *pdev,  {  	plat->rx_queues_to_use = 8;  	plat->tx_queues_to_use = 8; -	plat->clk_ptp_rate = 200000000;  	plat->use_phy_wol = 1;  	plat->safety_feat_cfg->tsoee = 1; @@ -654,6 +653,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,  	plat->serdes_powerup = intel_serdes_powerup;  	plat->serdes_powerdown = intel_serdes_powerdown; +	plat->clk_ptp_rate = 204800000; +  	return ehl_common_data(pdev, plat);  } @@ -667,6 +668,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,  	plat->bus_id = 1;  	plat->phy_interface = PHY_INTERFACE_MODE_RGMII; +	plat->clk_ptp_rate = 204800000; +  	return ehl_common_data(pdev, plat);  } @@ -683,6 +686,8 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,  	plat->bus_id = 2;  	plat->addr64 = 32; +	plat->clk_ptp_rate = 200000000; +  	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);  	return ehl_common_data(pdev, plat); @@ -722,6 +727,8 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,  	plat->bus_id = 3;  	plat->addr64 = 32; +	plat->clk_ptp_rate = 200000000; +  	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);  	return ehl_common_data(pdev, plat); @@ -757,7 +764,7 @@ static int tgl_common_data(struct pci_dev *pdev,  {  	plat->rx_queues_to_use = 6;  	plat->tx_queues_to_use = 4; -	plat->clk_ptp_rate = 200000000; +	plat->clk_ptp_rate = 204800000;  	plat->speed_mode_2500 = intel_speed_mode_2500;  	plat->safety_feat_cfg->tsoee = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 017dbbda0c1c..a25c187d3185 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -51,7 +51,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id  	struct stmmac_resources res;  	struct device_node *np;  	int ret, i, phy_mode; -	bool mdio = false;  	np = dev_of_node(&pdev->dev); @@ -69,29 +68,31 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id  	if (!plat)  		return -ENOMEM; +	plat->mdio_node = of_get_child_by_name(np, "mdio");  	if (plat->mdio_node) { -		dev_err(&pdev->dev, "Found MDIO subnode\n"); -		mdio = true; -	} +		dev_info(&pdev->dev, "Found MDIO subnode\n"); -	if (mdio) {  		plat->mdio_bus_data = devm_kzalloc(&pdev->dev,  						   sizeof(*plat->mdio_bus_data),  						   GFP_KERNEL); -		if (!plat->mdio_bus_data) -			return -ENOMEM; +		if (!plat->mdio_bus_data) { +			ret = -ENOMEM; +			goto err_put_node; +		}  		plat->mdio_bus_data->needs_reset = true;  	}  	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL); -	if (!plat->dma_cfg) -		return -ENOMEM; +	if (!plat->dma_cfg) { +		ret = -ENOMEM; +		goto err_put_node; +	}  	/* Enable pci device */  	ret = pci_enable_device(pdev);  	if (ret) {  		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); -		return ret; +		goto err_put_node;  	}  	/* Get the base address of device */ @@ -100,7 +101,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id  			continue;  		ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));  		if (ret) -			return ret; +			goto err_disable_device;  		break;  	} @@ -111,7 +112,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id  	phy_mode = device_get_phy_mode(&pdev->dev);  	if (phy_mode < 0) {  		dev_err(&pdev->dev, "phy_mode not found\n"); -		return phy_mode; +		ret = phy_mode; +		goto err_disable_device;  	}  	plat->phy_interface = phy_mode; @@ -128,6 +130,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id  	if (res.irq < 0) {  		dev_err(&pdev->dev, "IRQ macirq not found\n");  		ret = -ENODEV; +		goto err_disable_msi;  	}  	res.wol_irq = of_irq_get_byname(np, "eth_wake_irq"); @@ -140,15 +143,31 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id  	if (res.lpi_irq < 0) {  		dev_err(&pdev->dev, "IRQ eth_lpi not found\n");  		ret = -ENODEV; +		goto err_disable_msi;  	} -	return stmmac_dvr_probe(&pdev->dev, plat, &res); +	ret = stmmac_dvr_probe(&pdev->dev, plat, &res); +	if (ret) +		goto err_disable_msi; + +	return ret; + +err_disable_msi: +	pci_disable_msi(pdev); +err_disable_device: +	pci_disable_device(pdev); +err_put_node: +	of_node_put(plat->mdio_node); +	return ret;  }  static void loongson_dwmac_remove(struct pci_dev *pdev)  { +	struct net_device *ndev = dev_get_drvdata(&pdev->dev); +	struct stmmac_priv *priv = netdev_priv(ndev);  	int i; +	of_node_put(priv->plat->mdio_node);  	stmmac_dvr_remove(&pdev->dev);  	for (i = 0; i < PCI_STD_NUM_BARS; i++) { @@ -158,6 +177,7 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)  		break;  	} +	pci_disable_msi(pdev);  	pci_disable_device(pdev);  } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index c7a6588d9398..e8b507f88fbc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -272,11 +272,9 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,  	if (ret)  		return ret; -	devm_add_action_or_reset(dwmac->dev, -				 (void(*)(void *))clk_disable_unprepare, -				 dwmac->rgmii_tx_clk); - -	return 0; +	return devm_add_action_or_reset(dwmac->dev, +					(void(*)(void *))clk_disable_unprepare, +					clk);  }  static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index f7269d79a385..6656d76b6766 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1243,6 +1243,12 @@ static const struct rk_gmac_ops rk3588_ops = {  	.set_rgmii_speed = rk3588_set_gmac_speed,  	.set_rmii_speed = rk3588_set_gmac_speed,  	.set_clock_selection = rk3588_set_clock_selection, +	.regs_valid = true, +	.regs = { +		0xfe1b0000, /* gmac0 */ +		0xfe1c0000, /* gmac1 */ +		0x0, /* sentinel */ +	},  };  #define RV1108_GRF_GMAC_CON0		0X0900 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index c25bfecb4a2d..e5cfde1cbd5c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -748,6 +748,8 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,  	if (fc & FLOW_RX) {  		pr_debug("\tReceive Flow-Control ON\n");  		flow |= GMAC_RX_FLOW_CTRL_RFE; +	} else { +		pr_debug("\tReceive Flow-Control OFF\n");  	}  	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 65c96773c6d2..23ec0a9e396c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1061,8 +1061,16 @@ static void stmmac_mac_link_up(struct phylink_config *config,  		ctrl |= priv->hw->link.duplex;  	/* Flow Control operation */ -	if (tx_pause && rx_pause) -		stmmac_mac_flow_ctrl(priv, duplex); +	if (rx_pause && tx_pause) +		priv->flow_ctrl = FLOW_AUTO; +	else if (rx_pause && !tx_pause) +		priv->flow_ctrl = FLOW_RX; +	else if (!rx_pause && tx_pause) +		priv->flow_ctrl = FLOW_TX; +	else +		priv->flow_ctrl = FLOW_OFF; + +	stmmac_mac_flow_ctrl(priv, duplex);  	if (ctrl != old_ctrl)  		writel(ctrl, priv->ioaddr + MAC_CTRL_REG); @@ -1214,6 +1222,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)  	if (priv->plat->tx_queues_to_use > 1)  		priv->phylink_config.mac_capabilities &=  			~(MAC_10HD | MAC_100HD | MAC_1000HD); +	priv->phylink_config.mac_managed_pm = true;  	phylink = phylink_create(&priv->phylink_config, fwnode,  				 mode, &stmmac_phylink_mac_ops); @@ -6547,6 +6556,9 @@ void stmmac_xdp_release(struct net_device *dev)  	struct stmmac_priv *priv = netdev_priv(dev);  	u32 chan; +	/* Ensure tx function is not running */ +	netif_tx_disable(dev); +  	/* Disable NAPI process */  	stmmac_disable_all_queues(priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 50f6b4a14be4..eb6d9cd8e93f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -108,10 +108,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)  	axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");  	axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); -	axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); -	axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); -	axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); -	axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb"); +	axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe"); +	axi->axi_fb = of_property_read_bool(np, "snps,fb"); +	axi->axi_mb = of_property_read_bool(np, "snps,mb"); +	axi->axi_rb =  of_property_read_bool(np, "snps,rb");  	if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))  		axi->axi_wr_osr_lmt = 1; diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 91f10f746dff..1c16548415cd 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1328,7 +1328,7 @@ static int happy_meal_init(struct happy_meal *hp)  	void __iomem *erxregs      = hp->erxregs;  	void __iomem *bregs        = hp->bigmacregs;  	void __iomem *tregs        = hp->tcvregs; -	const char *bursts; +	const char *bursts = "64";  	u32 regtmp, rxcfg;  	/* If auto-negotiation timer is running, kill it. */ diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index 9be585237277..c499a14314f1 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -287,7 +287,6 @@ static u32 spl2sw_init_netdev(struct platform_device *pdev, u8 *mac_addr,  	if (ret) {  		dev_err(&pdev->dev, "Failed to register net device \"%s\"!\n",  			ndev->name); -		free_netdev(ndev);  		*r_ndev = NULL;  		return ret;  	} diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 7f86068f3ff6..b3b0ba842541 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1454,7 +1454,7 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy  	if (speed == SPEED_1000)  		mac_control |= CPSW_SL_CTL_GIG; -	if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII) +	if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))  		/* Can be used with in band mode only */  		mac_control |= CPSW_SL_CTL_EXT_EN;  	if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII) @@ -2082,7 +2082,7 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)  	for (i = 0; i < common->port_num; i++) {  		port = &common->ports[i]; -		if (port->ndev) +		if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED)  			unregister_netdev(port->ndev);  	}  } @@ -2823,7 +2823,6 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)  	if (ret < 0)  		return ret; -	am65_cpsw_nuss_phylink_cleanup(common);  	am65_cpsw_unregister_devlink(common);  	am65_cpsw_unregister_notifiers(common); @@ -2831,6 +2830,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)  	 * dma_deconfigure(dev) before devres_release_all(dev)  	 */  	am65_cpsw_nuss_cleanup_ndev(common); +	am65_cpsw_nuss_phylink_cleanup(common);  	of_platform_device_destroy(common->mdio_dev, NULL); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 709ca6dd6ecb..13c9c2d6b79b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -854,6 +854,8 @@ static int cpsw_ndo_open(struct net_device *ndev)  err_cleanup:  	if (!cpsw->usage_count) { +		napi_disable(&cpsw->napi_rx); +		napi_disable(&cpsw->napi_tx);  		cpdma_ctlr_stop(cpsw->dma);  		cpsw_destroy_xdp_rxqs(cpsw);  	} diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 2cd2afc3fff0..d09d352e1c0a 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1290,12 +1290,15 @@ static int tsi108_open(struct net_device *dev)  	data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,  					  &data->rxdma, GFP_KERNEL); -	if (!data->rxring) +	if (!data->rxring) { +		free_irq(data->irq_num, dev);  		return -ENOMEM; +	}  	data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,  					  &data->txdma, GFP_KERNEL);  	if (!data->txring) { +		free_irq(data->irq_num, dev);  		dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,  				    data->rxdma);  		return -ENOMEM; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 05848ff15fb5..a3967f8de417 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -108,7 +108,7 @@   * @next_tx_buf_to_use:	next Tx buffer to write to   * @next_rx_buf_to_use:	next Rx buffer to read from   * @base_addr:		base address of the Emaclite device - * @reset_lock:		lock used for synchronization + * @reset_lock:		lock to serialize xmit and tx_timeout execution   * @deferred_skb:	holds an skb (for transmission at a later time) when the   *			Tx buffer is not free   * @phy_dev:		pointer to the PHY device |