diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
30 files changed, 2191 insertions, 473 deletions
| diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index af75156919ed..c1d3ee9baf7e 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,7 +61,7 @@ config BCM63XX_ENET  config BCMGENET  	tristate "Broadcom GENET internal MAC support" -	depends on OF && HAS_IOMEM +	depends on HAS_IOMEM  	select MII  	select PHYLIB  	select FIXED_PHY @@ -157,7 +157,6 @@ config BGMAC  config BGMAC_BCMA  	tristate "Broadcom iProc GBit BCMA support"  	depends on BCMA && BCMA_HOST_SOC -	depends on HAS_DMA  	depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST  	select BGMAC  	select PHYLIB @@ -170,7 +169,6 @@ config BGMAC_BCMA  config BGMAC_PLATFORM  	tristate "Broadcom iProc GBit platform support" -	depends on HAS_DMA  	depends on ARCH_BCM_IPROC || COMPILE_TEST  	depends on OF  	select BGMAC @@ -183,7 +181,7 @@ config BGMAC_PLATFORM  config SYSTEMPORT  	tristate "Broadcom SYSTEMPORT internal MAC support" -	depends on OF +	depends on HAS_IOMEM  	depends on NET_DSA || !NET_DSA  	select MII  	select PHYLIB @@ -232,4 +230,12 @@ config BNXT_DCB  	  If unsure, say N. +config BNXT_HWMON +	bool "Broadcom NetXtreme-C/E HWMON support" +	default y +	depends on BNXT && HWMON && !(BNXT=y && HWMON=m) +	---help--- +	  Say Y if you want to expose the thermal sensor data on NetXtreme-C/E +	  devices, via the hwmon sysfs interface. +  endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d5fca2e5a9bc..147045757b10 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -521,7 +521,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,  	struct bcm_sysport_priv *priv = netdev_priv(dev);  	u32 reg; -	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; +	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;  	wol->wolopts = priv->wolopts;  	if (!(priv->wolopts & WAKE_MAGICSECURE)) @@ -539,7 +539,7 @@ static int bcm_sysport_set_wol(struct net_device *dev,  {  	struct bcm_sysport_priv *priv = netdev_priv(dev);  	struct device *kdev = &priv->pdev->dev; -	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; +	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;  	if (!device_can_wakeup(kdev))  		return -ENOTSUPP; @@ -1041,17 +1041,45 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)  	return work_done;  } +static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) +{ +	u32 reg, bit; + +	reg = umac_readl(priv, UMAC_MPD_CTRL); +	if (enable) +		reg |= MPD_EN; +	else +		reg &= ~MPD_EN; +	umac_writel(priv, reg, UMAC_MPD_CTRL); + +	if (priv->is_lite) +		bit = RBUF_ACPI_EN_LITE; +	else +		bit = RBUF_ACPI_EN; + +	reg = rbuf_readl(priv, RBUF_CONTROL); +	if (enable) +		reg |= bit; +	else +		reg &= ~bit; +	rbuf_writel(priv, reg, RBUF_CONTROL); +} +  static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)  {  	u32 reg;  	/* Stop monitoring MPD interrupt */ -	intrl2_0_mask_set(priv, INTRL2_0_MPD); +	intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG); + +	/* Disable RXCHK, active filters and Broadcom tag matching */ +	reg = rxchk_readl(priv, RXCHK_CONTROL); +	reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << +		 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); +	rxchk_writel(priv, reg, RXCHK_CONTROL);  	/* Clear the MagicPacket detection logic */ -	reg = umac_readl(priv, UMAC_MPD_CTRL); -	reg &= ~MPD_EN; -	umac_writel(priv, reg, UMAC_MPD_CTRL); +	mpd_enable_set(priv, false);  	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");  } @@ -1077,6 +1105,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)  	struct bcm_sysport_priv *priv = netdev_priv(dev);  	struct bcm_sysport_tx_ring *txr;  	unsigned int ring, ring_bit; +	u32 reg;  	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &  			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); @@ -1102,9 +1131,14 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)  	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)  		bcm_sysport_tx_reclaim_all(priv); -	if (priv->irq0_stat & INTRL2_0_MPD) { -		netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); -		bcm_sysport_resume_from_wol(priv); +	if (priv->irq0_stat & INTRL2_0_MPD) +		netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); + +	if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) { +		reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & +				  RXCHK_BRCM_TAG_MATCH_MASK; +		netdev_info(priv->netdev, +			    "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);  	}  	if (!priv->is_lite) @@ -1946,8 +1980,8 @@ static int bcm_sysport_open(struct net_device *dev)  	if (!priv->is_lite)  		priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);  	else -		priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & -				   GIB_FCS_STRIP); +		priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & +				  GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);  	phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,  				0, priv->phy_interface); @@ -2090,6 +2124,132 @@ static int bcm_sysport_stop(struct net_device *dev)  	return 0;  } +static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv, +				 u64 location) +{ +	unsigned int index; +	u32 reg; + +	for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { +		reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); +		reg >>= RXCHK_BRCM_TAG_CID_SHIFT; +		reg &= RXCHK_BRCM_TAG_CID_MASK; +		if (reg == location) +			return index; +	} + +	return -EINVAL; +} + +static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv, +				struct ethtool_rxnfc *nfc) +{ +	int index; + +	/* This is not a rule that we know about */ +	index = bcm_sysport_rule_find(priv, nfc->fs.location); +	if (index < 0) +		return -EOPNOTSUPP; + +	nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; + +	return 0; +} + +static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, +				struct ethtool_rxnfc *nfc) +{ +	unsigned int index; +	u32 reg; + +	/* We cannot match locations greater than what the classification ID +	 * permits (256 entries) +	 */ +	if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) +		return -E2BIG; + +	/* We cannot support flows that are not destined for a wake-up */ +	if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) +		return -EOPNOTSUPP; + +	/* All filters are already in use, we cannot match more rules */ +	if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) == +	    RXCHK_BRCM_TAG_MAX) +		return -ENOSPC; + +	index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); +	if (index > RXCHK_BRCM_TAG_MAX) +		return -ENOSPC; + +	/* Location is the classification ID, and index is the position +	 * within one of our 8 possible filters to be programmed +	 */ +	reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); +	reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT); +	reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; +	rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); +	rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); + +	set_bit(index, priv->filters); + +	return 0; +} + +static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, +				u64 location) +{ +	int index; + +	/* This is not a rule that we know about */ +	index = bcm_sysport_rule_find(priv, location); +	if (index < 0) +		return -EOPNOTSUPP; + +	/* No need to disable this filter if it was enabled, this will +	 * be taken care of during suspend time by bcm_sysport_suspend_to_wol +	 */ +	clear_bit(index, priv->filters); + +	return 0; +} + +static int bcm_sysport_get_rxnfc(struct net_device *dev, +				 struct ethtool_rxnfc *nfc, u32 *rule_locs) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	int ret = -EOPNOTSUPP; + +	switch (nfc->cmd) { +	case ETHTOOL_GRXCLSRULE: +		ret = bcm_sysport_rule_get(priv, nfc); +		break; +	default: +		break; +	} + +	return ret; +} + +static int bcm_sysport_set_rxnfc(struct net_device *dev, +				 struct ethtool_rxnfc *nfc) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	int ret = -EOPNOTSUPP; + +	switch (nfc->cmd) { +	case ETHTOOL_SRXCLSRLINS: +		ret = bcm_sysport_rule_set(priv, nfc); +		break; +	case ETHTOOL_SRXCLSRLDEL: +		ret = bcm_sysport_rule_del(priv, nfc->fs.location); +		break; +	default: +		break; +	} + +	return ret; +} +  static const struct ethtool_ops bcm_sysport_ethtool_ops = {  	.get_drvinfo		= bcm_sysport_get_drvinfo,  	.get_msglevel		= bcm_sysport_get_msglvl, @@ -2104,10 +2264,12 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {  	.set_coalesce		= bcm_sysport_set_coalesce,  	.get_link_ksettings     = phy_ethtool_get_link_ksettings,  	.set_link_ksettings     = phy_ethtool_set_link_ksettings, +	.get_rxnfc		= bcm_sysport_get_rxnfc, +	.set_rxnfc		= bcm_sysport_set_rxnfc,  };  static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, -				    void *accel_priv, +				    struct net_device *sb_dev,  				    select_queue_fallback_t fallback)  {  	struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2116,7 +2278,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,  	unsigned int q, port;  	if (!netdev_uses_dsa(dev)) -		return fallback(dev, skb); +		return fallback(dev, skb, NULL);  	/* DSA tagging layer will have configured the correct queue */  	q = BRCM_TAG_GET_QUEUE(queue); @@ -2124,7 +2286,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,  	tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];  	if (unlikely(!tx_ring)) -		return fallback(dev, skb); +		return fallback(dev, skb, NULL);  	return tx_ring->index;  } @@ -2423,21 +2585,43 @@ static int bcm_sysport_remove(struct platform_device *pdev)  	return 0;  } -#ifdef CONFIG_PM_SLEEP  static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)  {  	struct net_device *ndev = priv->netdev;  	unsigned int timeout = 1000; +	unsigned int index, i = 0;  	u32 reg;  	/* Password has already been programmed */  	reg = umac_readl(priv, UMAC_MPD_CTRL); -	reg |= MPD_EN; +	if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) +		reg |= MPD_EN;  	reg &= ~PSW_EN;  	if (priv->wolopts & WAKE_MAGICSECURE)  		reg |= PSW_EN;  	umac_writel(priv, reg, UMAC_MPD_CTRL); +	if (priv->wolopts & WAKE_FILTER) { +		/* Turn on ACPI matching to steal packets from RBUF */ +		reg = rbuf_readl(priv, RBUF_CONTROL); +		if (priv->is_lite) +			reg |= RBUF_ACPI_EN_LITE; +		else +			reg |= RBUF_ACPI_EN; +		rbuf_writel(priv, reg, RBUF_CONTROL); + +		/* Enable RXCHK, active filters and Broadcom tag matching */ +		reg = rxchk_readl(priv, RXCHK_CONTROL); +		reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << +			 RXCHK_BRCM_TAG_MATCH_SHIFT); +		for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { +			reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i); +			i++; +		} +		reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN; +		rxchk_writel(priv, reg, RXCHK_CONTROL); +	} +  	/* Make sure RBUF entered WoL mode as result */  	do {  		reg = rbuf_readl(priv, RBUF_STATUS); @@ -2449,9 +2633,7 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)  	/* Do not leave the UniMAC RBUF matching only MPD packets */  	if (!timeout) { -		reg = umac_readl(priv, UMAC_MPD_CTRL); -		reg &= ~MPD_EN; -		umac_writel(priv, reg, UMAC_MPD_CTRL); +		mpd_enable_set(priv, false);  		netif_err(priv, wol, ndev, "failed to enter WOL mode\n");  		return -ETIMEDOUT;  	} @@ -2460,14 +2642,14 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)  	umac_enable_set(priv, CMD_RX_EN, 1);  	/* Enable the interrupt wake-up source */ -	intrl2_0_mask_clear(priv, INTRL2_0_MPD); +	intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);  	netif_dbg(priv, wol, ndev, "entered WOL mode\n");  	return 0;  } -static int bcm_sysport_suspend(struct device *d) +static int __maybe_unused bcm_sysport_suspend(struct device *d)  {  	struct net_device *dev = dev_get_drvdata(d);  	struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2529,7 +2711,7 @@ static int bcm_sysport_suspend(struct device *d)  	return ret;  } -static int bcm_sysport_resume(struct device *d) +static int __maybe_unused bcm_sysport_resume(struct device *d)  {  	struct net_device *dev = dev_get_drvdata(d);  	struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2622,7 +2804,6 @@ out_free_tx_rings:  		bcm_sysport_fini_tx_ring(priv, i);  	return ret;  } -#endif  static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,  		bcm_sysport_suspend, bcm_sysport_resume); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index d6e5d0cbf3a3..046c6c1d97fd 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -11,6 +11,7 @@  #ifndef __BCM_SYSPORT_H  #define __BCM_SYSPORT_H +#include <linux/bitmap.h>  #include <linux/if_vlan.h>  #include <linux/net_dim.h> @@ -155,14 +156,18 @@ struct bcm_rsb {  #define  RXCHK_PARSE_AUTH		(1 << 22)  #define RXCHK_BRCM_TAG0			0x04 -#define RXCHK_BRCM_TAG(i)		((i) * RXCHK_BRCM_TAG0) +#define RXCHK_BRCM_TAG(i)		((i) * 0x4 + RXCHK_BRCM_TAG0)  #define RXCHK_BRCM_TAG0_MASK		0x24 -#define RXCHK_BRCM_TAG_MASK(i)		((i) * RXCHK_BRCM_TAG0_MASK) +#define RXCHK_BRCM_TAG_MASK(i)		((i) * 0x4 + RXCHK_BRCM_TAG0_MASK)  #define RXCHK_BRCM_TAG_MATCH_STATUS	0x44  #define RXCHK_ETHERTYPE			0x48  #define RXCHK_BAD_CSUM_CNTR		0x4C  #define RXCHK_OTHER_DISC_CNTR		0x50 +#define RXCHK_BRCM_TAG_MAX		8 +#define RXCHK_BRCM_TAG_CID_SHIFT	16 +#define RXCHK_BRCM_TAG_CID_MASK		0xff +  /* TXCHCK offsets and defines */  #define SYS_PORT_TXCHK_OFFSET		0x380  #define TXCHK_PKT_RDY_THRESH		0x00 @@ -185,6 +190,7 @@ struct bcm_rsb {  #define  RBUF_RSB_SWAP0			(1 << 22)  #define  RBUF_RSB_SWAP1			(1 << 23)  #define  RBUF_ACPI_EN			(1 << 23) +#define  RBUF_ACPI_EN_LITE		(1 << 24)  #define RBUF_PKT_RDY_THRESH		0x04 @@ -278,7 +284,8 @@ struct bcm_rsb {  #define  GIB_GTX_CLK_EXT_CLK		(0 << GIB_GTX_CLK_SEL_SHIFT)  #define  GIB_GTX_CLK_125MHZ		(1 << GIB_GTX_CLK_SEL_SHIFT)  #define  GIB_GTX_CLK_250MHZ		(2 << GIB_GTX_CLK_SEL_SHIFT) -#define  GIB_FCS_STRIP			(1 << 6) +#define  GIB_FCS_STRIP_SHIFT		6 +#define  GIB_FCS_STRIP			(1 << GIB_FCS_STRIP_SHIFT)  #define  GIB_LCL_LOOP_EN		(1 << 7)  #define  GIB_LCL_LOOP_TXEN		(1 << 8)  #define  GIB_RMT_LOOP_EN		(1 << 9) @@ -776,6 +783,7 @@ struct bcm_sysport_priv {  	/* Ethtool */  	u32			msg_enable; +	DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);  	struct bcm_sysport_stats64	stats64; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index e6ea8e61f96d..4c94d9218bba 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)  {  	struct device *dma_dev = bgmac->dma_dev;  	int empty_slot; -	bool freed = false;  	unsigned bytes_compl = 0, pkts_compl = 0;  	/* The last slot that hardware didn't consume yet */ @@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)  		slot->dma_addr = 0;  		ring->start++; -		freed = true;  	}  	if (!pkts_compl) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d847e1b9c37b..be1506169076 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1533,6 +1533,7 @@ struct bnx2x {  	struct link_vars	link_vars;  	u32			link_cnt;  	struct bnx2x_link_report_data last_reported_link; +	bool			force_link_down;  	struct mdio_if_info	mdio; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8cd73ff5debc..5a727d4729da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)  {  	struct bnx2x_link_report_data cur_data; +	if (bp->force_link_down) { +		bp->link_vars.link_up = 0; +		return; +	} +  	/* reread mf_cfg */  	if (IS_PF(bp) && !CHIP_IS_E1(bp))  		bnx2x_read_mf_cfg(bp); @@ -1905,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)  }  u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, -		       void *accel_priv, select_queue_fallback_t fallback) +		       struct net_device *sb_dev, +		       select_queue_fallback_t fallback)  {  	struct bnx2x *bp = netdev_priv(dev); @@ -1927,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,  	}  	/* select a non-FCoE queue */ -	return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); +	return fallback(dev, skb, NULL) % +	       (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);  }  void bnx2x_set_num_queues(struct bnx2x *bp) @@ -2817,6 +2824,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)  		bp->pending_max = 0;  	} +	bp->force_link_down = false;  	if (bp->port.pmf) {  		rc = bnx2x_initial_phy_init(bp, load_mode);  		if (rc) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a8ce5c55bbb0..0e508e5defce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,  /* select_queue callback */  u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, -		       void *accel_priv, select_queue_fallback_t fallback); +		       struct net_device *sb_dev, +		       select_queue_fallback_t fallback);  static inline void bnx2x_update_rx_prod(struct bnx2x *bp,  					struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index da18aa239acb..a4a90b6cdb46 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)  			DP(BNX2X_MSG_ETHTOOL,  			   "rss re-configured, UDP 4-tupple %s\n",  			   udp_rss_requested ? "enabled" : "disabled"); -			return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); +			if (bp->state == BNX2X_STATE_OPEN) +				return bnx2x_rss(bp, &bp->rss_conf_obj, false, +						 true);  		} else if ((info->flow_type == UDP_V6_FLOW) &&  			   (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {  			bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;  			DP(BNX2X_MSG_ETHTOOL,  			   "rss re-configured, UDP 4-tupple %s\n",  			   udp_rss_requested ? "enabled" : "disabled"); -			return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); +			if (bp->state == BNX2X_STATE_OPEN) +				return bnx2x_rss(bp, &bp->rss_conf_obj, false, +						 true);  		}  		return 0; @@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,  		bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;  	} -	return bnx2x_config_rss_eth(bp, false); +	if (bp->state == BNX2X_STATE_OPEN) +		return bnx2x_config_rss_eth(bp, false); + +	return 0;  }  /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 22243c480a05..98d4c5a3ff21 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params,  		 */  		if (!vars->link_up)  			break; +		/* else: fall through */  	case LED_MODE_ON:  		if (((params->phy[EXT_PHY1].type ==  			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || @@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params,  	switch (link_config  & PORT_FEATURE_LINK_SPEED_MASK) {  	case PORT_FEATURE_LINK_SPEED_10M_HALF:  		phy->req_duplex = DUPLEX_HALF; +		/* fall through */  	case PORT_FEATURE_LINK_SPEED_10M_FULL:  		phy->req_line_speed = SPEED_10;  		break;  	case PORT_FEATURE_LINK_SPEED_100M_HALF:  		phy->req_duplex = DUPLEX_HALF; +		/* fall through */  	case PORT_FEATURE_LINK_SPEED_100M_FULL:  		phy->req_line_speed = SPEED_100;  		break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5b1ed240bf18..71362b7f6040 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp)  			       bp->num_queues,  			       1 + bp->num_cnic_queues); -		/* falling through... */ +		/* fall through */  	case BNX2X_INT_MODE_MSI:  		bnx2x_enable_msi(bp); -		/* falling through... */ +		/* fall through */  	case BNX2X_INT_MODE_INTX:  		bp->num_ethernet_queues = 1;  		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; @@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)  		bp->sp_rtnl_state = 0;  		smp_mb(); +		/* Immediately indicate link as down */ +		bp->link_vars.link_up = 0; +		bp->force_link_down = true; +		netif_carrier_off(bp->dev); +		BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); +  		bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);  		/* When ret value shows failure of allocation failure,  		 * the nic is rebooted again. If open still fails, a error diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 8baf9d3eb4b1..3f4d2c8da21a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,  	/* DEL command deletes all currently configured MACs */  	case BNX2X_MCAST_CMD_DEL:  		o->set_registry_size(o, 0); -		/* Don't break */ +		/* fall through */  	/* RESTORE command will restore the entire multicast configuration */  	case BNX2X_MCAST_CMD_RESTORE: @@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,  	/* DEL command deletes all currently configured MACs */  	case BNX2X_MCAST_CMD_DEL:  		o->set_registry_size(o, 0); -		/* Don't break */ +		/* fall through */  	/* RESTORE command will restore the entire multicast configuration */  	case BNX2X_MCAST_CMD_RESTORE: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index dc77bfded865..62da46537734 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1827,6 +1827,7 @@ get_vf:  		DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",  		   vf->abs_vfid, qidx);  		bnx2x_vf_handle_rss_update_eqe(bp, vf); +		/* fall through */  	case EVENT_RING_OPCODE_VF_FLR:  		/* Do nothing for now */  		return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 176fc9f4d7de..8bb1e38b1681 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -51,6 +51,8 @@  #include <linux/cpu_rmap.h>  #include <linux/cpumask.h>  #include <net/pkt_cls.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h>  #include "bnxt_hsi.h"  #include "bnxt.h" @@ -1115,7 +1117,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,  		tpa_info->hash_type = PKT_HASH_TYPE_L4;  		tpa_info->gso_type = SKB_GSO_TCPV4;  		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ -		if (hash_type == 3) +		if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))  			tpa_info->gso_type = SKB_GSO_TCPV6;  		tpa_info->rss_hash =  			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); @@ -1727,8 +1729,8 @@ static int bnxt_async_event_process(struct bnxt *bp,  					    speed);  		}  		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); -		/* fall thru */  	} +	/* fall through */  	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:  		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);  		break; @@ -3012,13 +3014,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)  			  bp->hwrm_cmd_resp_dma_addr);  	bp->hwrm_cmd_resp_addr = NULL; -	if (bp->hwrm_dbg_resp_addr) { -		dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, -				  bp->hwrm_dbg_resp_addr, -				  bp->hwrm_dbg_resp_dma_addr); - -		bp->hwrm_dbg_resp_addr = NULL; -	}  }  static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -3030,12 +3025,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp)  						   GFP_KERNEL);  	if (!bp->hwrm_cmd_resp_addr)  		return -ENOMEM; -	bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, -						    HWRM_DBG_REG_BUF_SIZE, -						    &bp->hwrm_dbg_resp_dma_addr, -						    GFP_KERNEL); -	if (!bp->hwrm_dbg_resp_addr) -		netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");  	return 0;  } @@ -3458,7 +3447,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,  	cp_ring_id = le16_to_cpu(req->cmpl_ring);  	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; -	if (bp->flags & BNXT_FLAG_SHORT_CMD) { +	if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {  		void *short_cmd_req = bp->hwrm_short_cmd_req_addr;  		memcpy(short_cmd_req, req, msg_len); @@ -3651,7 +3640,9 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,  static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)  { +	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;  	struct hwrm_func_drv_rgtr_input req = {0}; +	int rc;  	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); @@ -3689,7 +3680,15 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)  			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);  	} -	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +	mutex_lock(&bp->hwrm_cmd_lock); +	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +	if (rc) +		rc = -EIO; +	else if (resp->flags & +		 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) +		bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; +	mutex_unlock(&bp->hwrm_cmd_lock); +	return rc;  }  static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) @@ -3994,6 +3993,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)  	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);  	if (set_rss) {  		req.hash_type = cpu_to_le32(bp->rss_hash_cfg); +		req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;  		if (vnic->flags & BNXT_VNIC_RSS_FLAG) {  			if (BNXT_CHIP_TYPE_NITRO_A0(bp))  				max_rings = bp->rx_nr_rings - 1; @@ -4591,7 +4591,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)  	}  	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); -	if (bp->flags & BNXT_FLAG_NEW_RM) { +	if (BNXT_NEW_RM(bp)) {  		u16 cp, stats;  		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); @@ -4637,7 +4637,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,  	req->fid = cpu_to_le16(0xffff);  	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;  	req->num_tx_rings = cpu_to_le16(tx_rings); -	if (bp->flags & BNXT_FLAG_NEW_RM) { +	if (BNXT_NEW_RM(bp)) {  		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;  		enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |  				      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; @@ -4710,7 +4710,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,  	struct hwrm_func_vf_cfg_input req = {0};  	int rc; -	if (!(bp->flags & BNXT_FLAG_NEW_RM)) { +	if (!BNXT_NEW_RM(bp)) {  		bp->hw_resc.resv_tx_rings = tx_rings;  		return 0;  	} @@ -4770,7 +4770,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)  		vnic = rx + 1;  	if (bp->flags & BNXT_FLAG_AGG_RINGS)  		rx <<= 1; -	if ((bp->flags & BNXT_FLAG_NEW_RM) && +	if (BNXT_NEW_RM(bp) &&  	    (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||  	     hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))  		return true; @@ -4806,7 +4806,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)  		return rc;  	tx = hw_resc->resv_tx_rings; -	if (bp->flags & BNXT_FLAG_NEW_RM) { +	if (BNXT_NEW_RM(bp)) {  		rx = hw_resc->resv_rx_rings;  		cp = hw_resc->resv_cp_rings;  		grp = hw_resc->resv_hw_ring_grps; @@ -4850,7 +4850,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,  	u32 flags;  	int rc; -	if (!(bp->flags & BNXT_FLAG_NEW_RM)) +	if (!BNXT_NEW_RM(bp))  		return 0;  	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, @@ -4879,7 +4879,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,  	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,  				     cp_rings, vnics);  	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; -	if (bp->flags & BNXT_FLAG_NEW_RM) +	if (BNXT_NEW_RM(bp))  		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |  			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |  			 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | @@ -5101,9 +5101,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)  	flags = le16_to_cpu(resp->flags);  	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |  		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { -		bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; +		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;  		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) -			bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; +			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;  	}  	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))  		bp->flags |= BNXT_FLAG_MULTI_HOST; @@ -5175,7 +5175,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)  		pf->vf_resv_strategy =  			le16_to_cpu(resp->vf_reservation_strategy); -		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL) +		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)  			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;  	}  hwrm_func_resc_qcaps_exit: @@ -5261,7 +5261,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)  	if (bp->hwrm_spec_code >= 0x10803) {  		rc = bnxt_hwrm_func_resc_qcaps(bp, true);  		if (!rc) -			bp->flags |= BNXT_FLAG_NEW_RM; +			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;  	}  	return 0;  } @@ -5281,7 +5281,8 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)  	int rc = 0;  	struct hwrm_queue_qportcfg_input req = {0};  	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; -	u8 i, *qptr; +	u8 i, j, *qptr; +	bool no_rdma;  	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); @@ -5299,19 +5300,24 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)  	if (bp->max_tc > BNXT_MAX_QUEUE)  		bp->max_tc = BNXT_MAX_QUEUE; +	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); +	qptr = &resp->queue_id0; +	for (i = 0, j = 0; i < bp->max_tc; i++) { +		bp->q_info[j].queue_id = *qptr++; +		bp->q_info[j].queue_profile = *qptr++; +		bp->tc_to_qidx[j] = j; +		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || +		    (no_rdma && BNXT_PF(bp))) +			j++; +	} +	bp->max_tc = max_t(u8, j, 1); +  	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)  		bp->max_tc = 1;  	if (bp->max_lltc > bp->max_tc)  		bp->max_lltc = bp->max_tc; -	qptr = &resp->queue_id0; -	for (i = 0; i < bp->max_tc; i++) { -		bp->q_info[i].queue_id = *qptr++; -		bp->q_info[i].queue_profile = *qptr++; -		bp->tc_to_qidx[i] = i; -	} -  qportcfg_exit:  	mutex_unlock(&bp->hwrm_cmd_lock);  	return rc; @@ -5364,7 +5370,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)  	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);  	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&  	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) -		bp->flags |= BNXT_FLAG_SHORT_CMD; +		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;  hwrm_ver_get_exit:  	mutex_unlock(&bp->hwrm_cmd_lock); @@ -5712,7 +5718,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)  	}  	vnic->uc_filter_count = 1; -	vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; +	vnic->rx_mask = 0; +	if (bp->dev->flags & IFF_BROADCAST) +		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;  	if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))  		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; @@ -5917,7 +5925,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)  	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);  } -void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) +static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)  {  	bp->hw_resc.max_irqs = max_irqs;  } @@ -5931,7 +5939,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)  	max_idx = min_t(int, bp->total_irqs, max_cp);  	avail_msix = max_idx - bp->cp_nr_rings; -	if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num) +	if (!BNXT_NEW_RM(bp) || avail_msix >= num)  		return avail_msix;  	if (max_irq < total_req) { @@ -5944,7 +5952,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)  static int bnxt_get_num_msix(struct bnxt *bp)  { -	if (!(bp->flags & BNXT_FLAG_NEW_RM)) +	if (!BNXT_NEW_RM(bp))  		return bnxt_get_max_func_irqs(bp);  	return bnxt_cp_rings_in_use(bp); @@ -6067,8 +6075,7 @@ int bnxt_reserve_rings(struct bnxt *bp)  		netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);  		return rc;  	} -	if ((bp->flags & BNXT_FLAG_NEW_RM) && -	    (bnxt_get_num_msix(bp) != bp->total_irqs)) { +	if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {  		bnxt_ulp_irq_stop(bp);  		bnxt_clear_int_mode(bp);  		rc = bnxt_init_int_mode(bp); @@ -6348,6 +6355,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)  		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &  				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;  	} +	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) { +		if (bp->test_info) +			bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; +	}  	if (resp->supported_speeds_auto_mode)  		link_info->support_auto_speeds =  			le16_to_cpu(resp->supported_speeds_auto_mode); @@ -6644,6 +6655,39 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)  	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);  } +static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) +{ +	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; +	struct hwrm_func_drv_if_change_input req = {0}; +	bool resc_reinit = false; +	int rc; + +	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) +		return 0; + +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); +	if (up) +		req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); +	mutex_lock(&bp->hwrm_cmd_lock); +	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +	if (!rc && (resp->flags & +		    cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE))) +		resc_reinit = true; +	mutex_unlock(&bp->hwrm_cmd_lock); + +	if (up && resc_reinit && BNXT_NEW_RM(bp)) { +		struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + +		rc = bnxt_hwrm_func_resc_qcaps(bp, true); +		hw_resc->resv_cp_rings = 0; +		hw_resc->resv_tx_rings = 0; +		hw_resc->resv_rx_rings = 0; +		hw_resc->resv_hw_ring_grps = 0; +		hw_resc->resv_vnics = 0; +	} +	return rc; +} +  static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)  {  	struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; @@ -6753,6 +6797,62 @@ static void bnxt_get_wol_settings(struct bnxt *bp)  	} while (handle && handle != 0xffff);  } +#ifdef CONFIG_BNXT_HWMON +static ssize_t bnxt_show_temp(struct device *dev, +			      struct device_attribute *devattr, char *buf) +{ +	struct hwrm_temp_monitor_query_input req = {0}; +	struct hwrm_temp_monitor_query_output *resp; +	struct bnxt *bp = dev_get_drvdata(dev); +	u32 temp = 0; + +	resp = bp->hwrm_cmd_resp_addr; +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); +	mutex_lock(&bp->hwrm_cmd_lock); +	if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) +		temp = resp->temp * 1000; /* display millidegree */ +	mutex_unlock(&bp->hwrm_cmd_lock); + +	return sprintf(buf, "%u\n", temp); +} +static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); + +static struct attribute *bnxt_attrs[] = { +	&sensor_dev_attr_temp1_input.dev_attr.attr, +	NULL +}; +ATTRIBUTE_GROUPS(bnxt); + +static void bnxt_hwmon_close(struct bnxt *bp) +{ +	if (bp->hwmon_dev) { +		hwmon_device_unregister(bp->hwmon_dev); +		bp->hwmon_dev = NULL; +	} +} + +static void bnxt_hwmon_open(struct bnxt *bp) +{ +	struct pci_dev *pdev = bp->pdev; + +	bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, +							  DRV_MODULE_NAME, bp, +							  bnxt_groups); +	if (IS_ERR(bp->hwmon_dev)) { +		bp->hwmon_dev = NULL; +		dev_warn(&pdev->dev, "Cannot register hwmon device\n"); +	} +} +#else +static void bnxt_hwmon_close(struct bnxt *bp) +{ +} + +static void bnxt_hwmon_open(struct bnxt *bp) +{ +} +#endif +  static bool bnxt_eee_config_ok(struct bnxt *bp)  {  	struct ethtool_eee *eee = &bp->eee; @@ -6888,7 +6988,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)  		rc = bnxt_request_irq(bp);  		if (rc) {  			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); -			goto open_err; +			goto open_err_irq;  		}  	} @@ -6905,8 +7005,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)  		mutex_lock(&bp->link_lock);  		rc = bnxt_update_phy_setting(bp);  		mutex_unlock(&bp->link_lock); -		if (rc) +		if (rc) {  			netdev_warn(bp->dev, "failed to update phy settings\n"); +			if (BNXT_SINGLE_PF(bp)) { +				bp->link_info.phy_retry = true; +				bp->link_info.phy_retry_expires = +					jiffies + 5 * HZ; +			} +		}  	}  	if (irq_re_init) @@ -6928,6 +7034,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)  open_err:  	bnxt_debug_dev_exit(bp);  	bnxt_disable_napi(bp); + +open_err_irq:  	bnxt_del_napi(bp);  open_err_free_mem: @@ -6990,8 +7098,16 @@ void bnxt_half_close_nic(struct bnxt *bp)  static int bnxt_open(struct net_device *dev)  {  	struct bnxt *bp = netdev_priv(dev); +	int rc; + +	bnxt_hwrm_if_change(bp, true); +	rc = __bnxt_open_nic(bp, true, true); +	if (rc) +		bnxt_hwrm_if_change(bp, false); + +	bnxt_hwmon_open(bp); -	return __bnxt_open_nic(bp, true, true); +	return rc;  }  static bool bnxt_drv_busy(struct bnxt *bp) @@ -7053,8 +7169,10 @@ static int bnxt_close(struct net_device *dev)  {  	struct bnxt *bp = netdev_priv(dev); +	bnxt_hwmon_close(bp);  	bnxt_close_nic(bp, true, true);  	bnxt_hwrm_shutdown_link(bp); +	bnxt_hwrm_if_change(bp, false);  	return 0;  } @@ -7214,13 +7332,16 @@ static void bnxt_set_rx_mode(struct net_device *dev)  	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |  		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | -		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); +		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | +		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);  	if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))  		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;  	uc_update = bnxt_uc_list_updated(bp); +	if (dev->flags & IFF_BROADCAST) +		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;  	if (dev->flags & IFF_ALLMULTI) {  		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;  		vnic->mc_list_count = 0; @@ -7301,7 +7422,7 @@ skip_uc:  static bool bnxt_can_reserve_rings(struct bnxt *bp)  {  #ifdef CONFIG_BNXT_SRIOV -	if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) { +	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {  		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;  		/* No minimum rings were provisioned by the PF.  Don't @@ -7351,7 +7472,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)  		return false;  	} -	if (!(bp->flags & BNXT_FLAG_NEW_RM)) +	if (!BNXT_NEW_RM(bp))  		return true;  	if (vnics == bp->hw_resc.resv_vnics) @@ -7585,6 +7706,16 @@ static void bnxt_timer(struct timer_list *t)  		set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);  		bnxt_queue_sp_work(bp);  	} + +	if (bp->link_info.phy_retry) { +		if (time_after(jiffies, bp->link_info.phy_retry_expires)) { +			bp->link_info.phy_retry = 0; +			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); +		} else { +			set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); +			bnxt_queue_sp_work(bp); +		} +	}  bnxt_restart_timer:  	mod_timer(&bp->timer, jiffies + bp->current_interval);  } @@ -7672,6 +7803,19 @@ static void bnxt_sp_task(struct work_struct *work)  			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",  				   rc);  	} +	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { +		int rc; + +		mutex_lock(&bp->link_lock); +		rc = bnxt_update_phy_setting(bp); +		mutex_unlock(&bp->link_lock); +		if (rc) { +			netdev_warn(bp->dev, "update phy settings retry failed\n"); +		} else { +			bp->link_info.phy_retry = false; +			netdev_info(bp->dev, "update phy settings retry succeeded\n"); +		} +	}  	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {  		mutex_lock(&bp->link_lock);  		bnxt_get_port_module_status(bp); @@ -7724,7 +7868,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,  	if (bp->flags & BNXT_FLAG_AGG_RINGS)  		rx_rings <<= 1;  	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; -	if (bp->flags & BNXT_FLAG_NEW_RM) +	if (BNXT_NEW_RM(bp))  		cp += bnxt_get_ulp_msix_num(bp);  	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,  				     vnics); @@ -7984,7 +8128,7 @@ static int bnxt_setup_tc_block(struct net_device *dev,  	switch (f->command) {  	case TC_BLOCK_BIND:  		return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, -					     bp, bp); +					     bp, bp, f->extack);  	case TC_BLOCK_UNBIND:  		tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);  		return 0; @@ -8502,11 +8646,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)  	int rx, tx, cp;  	_bnxt_get_max_rings(bp, &rx, &tx, &cp); +	*max_rx = rx; +	*max_tx = tx;  	if (!rx || !tx || !cp)  		return -ENOMEM; -	*max_rx = rx; -	*max_tx = tx;  	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);  } @@ -8520,8 +8664,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,  		/* Not enough rings, try disabling agg rings. */  		bp->flags &= ~BNXT_FLAG_AGG_RINGS;  		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); -		if (rc) +		if (rc) { +			/* set BNXT_FLAG_AGG_RINGS back for consistency */ +			bp->flags |= BNXT_FLAG_AGG_RINGS;  			return rc; +		}  		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;  		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);  		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); @@ -8730,7 +8877,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	if (rc)  		goto init_err_pci_clean; -	if (bp->flags & BNXT_FLAG_SHORT_CMD) { +	if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {  		rc = bnxt_alloc_hwrm_short_cmd_req(bp);  		if (rc)  			goto init_err_pci_clean; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9b14eb610b9f..fefa011320e0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,11 +12,11 @@  #define BNXT_H  #define DRV_MODULE_NAME		"bnxt_en" -#define DRV_MODULE_VERSION	"1.9.1" +#define DRV_MODULE_VERSION	"1.9.2"  #define DRV_VER_MAJ	1  #define DRV_VER_MIN	9 -#define DRV_VER_UPD	1 +#define DRV_VER_UPD	2  #include <linux/interrupt.h>  #include <linux/rhashtable.h> @@ -326,6 +326,10 @@ struct rx_tpa_start_cmp_ext {  	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &	\  	 RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT) +#define TPA_START_IS_IPV6(rx_tpa_start)				\ +	(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 &		\ +	    cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE))) +  struct rx_tpa_end_cmp {  	__le32 rx_tpa_end_cmp_len_flags_type;  	#define RX_TPA_END_CMP_TYPE				(0x3f << 0) @@ -862,6 +866,7 @@ struct bnxt_pf_info {  	u8	vf_resv_strategy;  #define BNXT_VF_RESV_STRATEGY_MAXIMAL	0  #define BNXT_VF_RESV_STRATEGY_MINIMAL	1 +#define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC	2  	void			*hwrm_cmd_req_addr[4];  	dma_addr_t		hwrm_cmd_req_dma_addr[4];  	struct bnxt_vf_info	*vf; @@ -959,6 +964,9 @@ struct bnxt_link_info {  	u16			advertising;	/* user adv setting */  	bool			force_link_chng; +	bool			phy_retry; +	unsigned long		phy_retry_expires; +  	/* a copy of phy_qcfg output used to report link  	 * info to VF  	 */ @@ -990,6 +998,8 @@ struct bnxt_led_info {  struct bnxt_test_info {  	u8 offline_mask; +	u8 flags; +#define BNXT_TEST_FL_EXT_LPBK	0x1  	u16 timeout;  	char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];  }; @@ -1134,7 +1144,6 @@ struct bnxt {  	atomic_t		intr_sem;  	u32			flags; -	#define BNXT_FLAG_DCB_ENABLED	0x1  	#define BNXT_FLAG_VF		0x2  	#define BNXT_FLAG_LRO		0x4  #ifdef CONFIG_INET @@ -1163,15 +1172,11 @@ struct bnxt {  					 BNXT_FLAG_ROCEV2_CAP)  	#define BNXT_FLAG_NO_AGG_RINGS	0x20000  	#define BNXT_FLAG_RX_PAGE_MODE	0x40000 -	#define BNXT_FLAG_FW_LLDP_AGENT	0x80000  	#define BNXT_FLAG_MULTI_HOST	0x100000 -	#define BNXT_FLAG_SHORT_CMD	0x200000  	#define BNXT_FLAG_DOUBLE_DB	0x400000 -	#define BNXT_FLAG_FW_DCBX_AGENT	0x800000  	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000  	#define BNXT_FLAG_DIM		0x2000000  	#define BNXT_FLAG_ROCE_MIRROR_CAP	0x4000000 -	#define BNXT_FLAG_NEW_RM	0x8000000  	#define BNXT_FLAG_PORT_STATS_EXT	0x10000000  	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\ @@ -1276,10 +1281,19 @@ struct bnxt {  	struct ieee_ets		*ieee_ets;  	u8			dcbx_cap;  	u8			default_pri; +	u8			max_dscp_value;  #endif /* CONFIG_BNXT_DCB */  	u32			msg_enable; +	u32			fw_cap; +	#define BNXT_FW_CAP_SHORT_CMD	0x00000001 +	#define BNXT_FW_CAP_LLDP_AGENT	0x00000002 +	#define BNXT_FW_CAP_DCBX_AGENT	0x00000004 +	#define BNXT_FW_CAP_NEW_RM	0x00000008 +	#define BNXT_FW_CAP_IF_CHANGE	0x00000010 + +#define BNXT_NEW_RM(bp)		((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)  	u32			hwrm_spec_code;  	u16			hwrm_cmd_seq;  	u32			hwrm_intr_seq_id; @@ -1287,9 +1301,6 @@ struct bnxt {  	dma_addr_t		hwrm_short_cmd_req_dma_addr;  	void			*hwrm_cmd_resp_addr;  	dma_addr_t		hwrm_cmd_resp_dma_addr; -	void			*hwrm_dbg_resp_addr; -	dma_addr_t		hwrm_dbg_resp_dma_addr; -#define HWRM_DBG_REG_BUF_SIZE	128  	struct rx_port_stats	*hw_rx_port_stats;  	struct tx_port_stats	*hw_tx_port_stats; @@ -1345,6 +1356,7 @@ struct bnxt {  #define BNXT_GENEVE_DEL_PORT_SP_EVENT	13  #define BNXT_LINK_SPEED_CHNG_SP_EVENT	14  #define BNXT_FLOW_STATS_SP_EVENT	15 +#define BNXT_UPDATE_PHY_SP_EVENT	16  	struct bnxt_hw_resc	hw_resc;  	struct bnxt_pf_info	pf; @@ -1400,6 +1412,7 @@ struct bnxt {  	struct bnxt_tc_info	*tc_info;  	struct dentry		*debugfs_pdev;  	struct dentry		*debugfs_dim; +	struct device		*hwmon_dev;  };  #define BNXT_RX_STATS_OFFSET(counter)			\ @@ -1470,7 +1483,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);  unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);  void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);  unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); -void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);  int bnxt_get_avail_msix(struct bnxt *bp, int num);  int bnxt_reserve_rings(struct bnxt *bp);  void bnxt_tx_disable(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h new file mode 100644 index 000000000000..09c22f8fe399 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h @@ -0,0 +1,66 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2018 Broadcom Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_COREDUMP_H +#define BNXT_COREDUMP_H + +struct bnxt_coredump_segment_hdr { +	__u8 signature[4]; +	__le32 component_id; +	__le32 segment_id; +	__le32 flags; +	__u8 low_version; +	__u8 high_version; +	__le16 function_id; +	__le32 offset; +	__le32 length; +	__le32 status; +	__le32 duration; +	__le32 data_offset; +	__le32 instance; +	__le32 rsvd[5]; +}; + +struct bnxt_coredump_record { +	__u8 signature[4]; +	__le32 flags; +	__u8 low_version; +	__u8 high_version; +	__u8 asic_state; +	__u8 rsvd0[5]; +	char system_name[32]; +	__le16 year; +	__le16 month; +	__le16 day; +	__le16 hour; +	__le16 minute; +	__le16 second; +	__le16 utc_bias; +	__le16 rsvd1; +	char commandline[256]; +	__le32 total_segments; +	__le32 os_ver_major; +	__le32 os_ver_minor; +	__le32 rsvd2; +	char os_name[32]; +	__le16 end_year; +	__le16 end_month; +	__le16 end_day; +	__le16 end_hour; +	__le16 end_minute; +	__le16 end_second; +	__le16 end_utc_bias; +	__le32 asic_id1; +	__le32 asic_id2; +	__le32 coredump_status; +	__u8 ioctl_low_version; +	__u8 ioctl_high_version; +	__le16 rsvd3[313]; +}; +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index d5bc72cecde3..ddc98c359488 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -385,6 +385,61 @@ set_app_exit:  	return rc;  } +static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp) +{ +	struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr; +	struct hwrm_queue_dscp_qcaps_input req = {0}; +	int rc; + +	if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp)) +		return 0; + +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1); +	mutex_lock(&bp->hwrm_cmd_lock); +	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +	if (!rc) { +		bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1; +		if (bp->max_dscp_value < 0x3f) +			bp->max_dscp_value = 0; +	} + +	mutex_unlock(&bp->hwrm_cmd_lock); +	return rc; +} + +static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, +					bool add) +{ +	struct hwrm_queue_dscp2pri_cfg_input req = {0}; +	struct bnxt_dscp2pri_entry *dscp2pri; +	dma_addr_t mapping; +	int rc; + +	if (bp->hwrm_spec_code < 0x10800) +		return 0; + +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1); +	dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri), +				      &mapping, GFP_KERNEL); +	if (!dscp2pri) +		return -ENOMEM; + +	req.src_data_addr = cpu_to_le64(mapping); +	dscp2pri->dscp = app->protocol; +	if (add) +		dscp2pri->mask = 0x3f; +	else +		dscp2pri->mask = 0; +	dscp2pri->pri = app->priority; +	req.entry_cnt = cpu_to_le16(1); +	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +	if (rc) +		rc = -EIO; +	dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri, +			  mapping); +	return rc; +} +  static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)  {  	int total_ets_bw = 0; @@ -551,15 +606,30 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)  	return rc;  } +static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app) +{ +	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) { +		if (!bp->max_dscp_value) +			return -ENOTSUPP; +		if (app->protocol > bp->max_dscp_value) +			return -EINVAL; +	} +	return 0; +} +  static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)  {  	struct bnxt *bp = netdev_priv(dev); -	int rc = -EINVAL; +	int rc;  	if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||  	    !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))  		return -EINVAL; +	rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app); +	if (rc) +		return rc; +  	rc = dcb_ieee_setapp(dev, app);  	if (rc)  		return rc; @@ -570,6 +640,9 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)  	     app->protocol == ROCE_V2_UDP_DPORT))  		rc = bnxt_hwrm_set_dcbx_app(bp, app, true); +	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) +		rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true); +  	return rc;  } @@ -582,6 +655,10 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)  	    !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))  		return -EINVAL; +	rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app); +	if (rc) +		return rc; +  	rc = dcb_ieee_delapp(dev, app);  	if (rc)  		return rc; @@ -591,6 +668,9 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)  	     app->protocol == ROCE_V2_UDP_DPORT))  		rc = bnxt_hwrm_set_dcbx_app(bp, app, false); +	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) +		rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false); +  	return rc;  } @@ -610,7 +690,7 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)  		return 1;  	if (mode & DCB_CAP_DCBX_HOST) { -		if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) +		if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))  			return 1;  		/* only support IEEE */ @@ -642,10 +722,11 @@ void bnxt_dcb_init(struct bnxt *bp)  	if (bp->hwrm_spec_code < 0x10501)  		return; +	bnxt_hwrm_queue_dscp_qcaps(bp);  	bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; -	if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) +	if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))  		bp->dcbx_cap |= DCB_CAP_DCBX_HOST; -	else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT) +	else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT)  		bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;  	bp->dev->dcbnl_ops = &dcbnl_ops;  } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index 69efde785f23..6eed231de565 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -33,10 +33,20 @@ struct bnxt_cos2bw_cfg {  	u8			unused;  }; +struct bnxt_dscp2pri_entry { +	u8	dscp; +	u8	mask; +	u8	pri; +}; +  #define BNXT_LLQ(q_profile)	\  	((q_profile) ==		\  	 QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE) +#define BNXT_CNPQ(q_profile)	\ +	((q_profile) ==		\ +	 QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP) +  #define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL	0x0300  void bnxt_dcb_init(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 402fa32f7a88..f3b9fbcc705b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = {  #endif /* CONFIG_BNXT_SRIOV */  }; +static const struct bnxt_dl_nvm_param nvm_params[] = { +	{DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, +	 BNXT_NVM_SHARED_CFG, 1}, +}; + +static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, +			     int msg_len, union devlink_param_value *val) +{ +	struct hwrm_nvm_get_variable_input *req = msg; +	void *data_addr = NULL, *buf = NULL; +	struct bnxt_dl_nvm_param nvm_param; +	int bytesize, idx = 0, rc, i; +	dma_addr_t data_dma_addr; + +	/* Get/Set NVM CFG parameter is supported only on PFs */ +	if (BNXT_VF(bp)) +		return -EPERM; + +	for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { +		if (nvm_params[i].id == param_id) { +			nvm_param = nvm_params[i]; +			break; +		} +	} + +	if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) +		idx = bp->pf.port_id; +	else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) +		idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; + +	bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; +	if (nvm_param.num_bits == 1) +		buf = &val->vbool; + +	data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, +					&data_dma_addr, GFP_KERNEL); +	if (!data_addr) +		return -ENOMEM; + +	req->dest_data_addr = cpu_to_le64(data_dma_addr); +	req->data_len = cpu_to_le16(nvm_param.num_bits); +	req->option_num = cpu_to_le16(nvm_param.offset); +	req->index_0 = cpu_to_le16(idx); +	if (idx) +		req->dimensions = cpu_to_le16(1); + +	if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) +		memcpy(data_addr, buf, bytesize); + +	rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); +	if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) +		memcpy(buf, data_addr, bytesize); + +	dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); +	if (rc) +		return -EIO; +	return 0; +} + +static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, +				 struct devlink_param_gset_ctx *ctx) +{ +	struct hwrm_nvm_get_variable_input req = {0}; +	struct bnxt *bp = bnxt_get_bp_from_dl(dl); + +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); +	return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); +} + +static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, +				 struct devlink_param_gset_ctx *ctx) +{ +	struct hwrm_nvm_set_variable_input req = {0}; +	struct bnxt *bp = bnxt_get_bp_from_dl(dl); + +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); +	return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); +} + +static const struct devlink_param bnxt_dl_params[] = { +	DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, +			      BIT(DEVLINK_PARAM_CMODE_PERMANENT), +			      bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, +			      NULL), +}; +  int bnxt_dl_register(struct bnxt *bp)  {  	struct devlink *dl;  	int rc; -	if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) -		return 0; - -	if (bp->hwrm_spec_code < 0x10803) { -		netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); +	if (bp->hwrm_spec_code < 0x10600) { +		netdev_warn(bp->dev, "Firmware does not support NVM params");  		return -ENOTSUPP;  	} @@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp)  	}  	bnxt_link_bp_to_dl(bp, dl); -	bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + +	/* Add switchdev eswitch mode setting, if SRIOV supported */ +	if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && +	    bp->hwrm_spec_code > 0x10803) +		bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; +  	rc = devlink_register(dl, &bp->pdev->dev);  	if (rc) { -		bnxt_link_bp_to_dl(bp, NULL); -		devlink_free(dl);  		netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); -		return rc; +		goto err_dl_free; +	} + +	rc = devlink_params_register(dl, bnxt_dl_params, +				     ARRAY_SIZE(bnxt_dl_params)); +	if (rc) { +		netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", +			    rc); +		goto err_dl_unreg;  	}  	return 0; + +err_dl_unreg: +	devlink_unregister(dl); +err_dl_free: +	bnxt_link_bp_to_dl(bp, NULL); +	devlink_free(dl); +	return rc;  }  void bnxt_dl_unregister(struct bnxt *bp) @@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp)  	if (!dl)  		return; +	devlink_params_unregister(dl, bnxt_dl_params, +				  ARRAY_SIZE(bnxt_dl_params));  	devlink_unregister(dl);  	devlink_free(dl);  } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index e92a35d8b642..2f68dc048390 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)  	}  } +#define NVM_OFF_ENABLE_SRIOV		401 + +enum bnxt_nvm_dir_type { +	BNXT_NVM_SHARED_CFG = 40, +	BNXT_NVM_PORT_CFG, +	BNXT_NVM_FUNC_CFG, +}; + +struct bnxt_dl_nvm_param { +	u16 id; +	u16 offset; +	u16 dir_type; +	u16 num_bits; +}; +  int bnxt_dl_register(struct bnxt *bp);  void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 7270c8b0cef3..e52d7af3ab3e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -16,12 +16,15 @@  #include <linux/etherdevice.h>  #include <linux/crc32.h>  #include <linux/firmware.h> +#include <linux/utsname.h> +#include <linux/time.h>  #include "bnxt_hsi.h"  #include "bnxt.h"  #include "bnxt_xdp.h"  #include "bnxt_ethtool.h"  #include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */  #include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */ +#include "bnxt_coredump.h"  #define FLASH_NVRAM_TIMEOUT	((HWRM_CMD_TIMEOUT) * 100)  #define FLASH_PACKAGE_TIMEOUT	((HWRM_CMD_TIMEOUT) * 200)  #define INSTALL_PACKAGE_TIMEOUT	((HWRM_CMD_TIMEOUT) * 200) @@ -112,6 +115,11 @@ static int bnxt_set_coalesce(struct net_device *dev,  					      BNXT_MAX_STATS_COAL_TICKS);  		stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);  		bp->stats_coal_ticks = stats_ticks; +		if (bp->stats_coal_ticks) +			bp->current_interval = +				bp->stats_coal_ticks * HZ / 1000000; +		else +			bp->current_interval = BNXT_TIMER_INTERVAL;  		update_stats = true;  	} @@ -162,7 +170,7 @@ static const struct {  	BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),  	BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),  	BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), -	BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames), +	BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),  	BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),  	BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),  	BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), @@ -205,9 +213,9 @@ static const struct {  	BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),  	BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),  	BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), -	BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames), +	BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),  	BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), -	BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames), +	BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),  	BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),  	BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),  	BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), @@ -463,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev,  	int max_tx_sch_inputs;  	/* Get the most up-to-date max_tx_sch_inputs. */ -	if (bp->flags & BNXT_FLAG_NEW_RM) +	if (BNXT_NEW_RM(bp))  		bnxt_hwrm_func_resc_qcaps(bp, false);  	max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; @@ -2392,7 +2400,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,  	return rc;  } -static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable) +static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)  {  	struct hwrm_port_phy_cfg_input req = {0}; @@ -2400,7 +2408,10 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)  	if (enable) {  		bnxt_disable_an_for_lpbk(bp, &req); -		req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; +		if (ext) +			req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; +		else +			req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;  	} else {  		req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;  	} @@ -2533,15 +2544,17 @@ static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)  	return rc;  } -#define BNXT_DRV_TESTS			3 +#define BNXT_DRV_TESTS			4  #define BNXT_MACLPBK_TEST_IDX		(bp->num_tests - BNXT_DRV_TESTS)  #define BNXT_PHYLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 1) -#define BNXT_IRQ_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 2) +#define BNXT_EXTLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 2) +#define BNXT_IRQ_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 3)  static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,  			   u64 *buf)  {  	struct bnxt *bp = netdev_priv(dev); +	bool do_ext_lpbk = false;  	bool offline = false;  	u8 test_results = 0;  	u8 test_mask = 0; @@ -2555,6 +2568,10 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,  		return;  	} +	if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && +	    (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK)) +		do_ext_lpbk = true; +  	if (etest->flags & ETH_TEST_FL_OFFLINE) {  		if (bp->pf.active_vfs) {  			etest->flags |= ETH_TEST_FL_FAILED; @@ -2595,13 +2612,22 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,  			buf[BNXT_MACLPBK_TEST_IDX] = 0;  		bnxt_hwrm_mac_loopback(bp, false); -		bnxt_hwrm_phy_loopback(bp, true); +		bnxt_hwrm_phy_loopback(bp, true, false);  		msleep(1000);  		if (bnxt_run_loopback(bp)) {  			buf[BNXT_PHYLPBK_TEST_IDX] = 1;  			etest->flags |= ETH_TEST_FL_FAILED;  		} -		bnxt_hwrm_phy_loopback(bp, false); +		if (do_ext_lpbk) { +			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; +			bnxt_hwrm_phy_loopback(bp, true, true); +			msleep(1000); +			if (bnxt_run_loopback(bp)) { +				buf[BNXT_EXTLPBK_TEST_IDX] = 1; +				etest->flags |= ETH_TEST_FL_FAILED; +			} +		} +		bnxt_hwrm_phy_loopback(bp, false, false);  		bnxt_half_close_nic(bp);  		bnxt_open_nic(bp, false, true);  	} @@ -2662,6 +2688,331 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)  	return rc;  } +static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, +				  struct bnxt_hwrm_dbg_dma_info *info) +{ +	struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; +	struct hwrm_dbg_cmn_input *cmn_req = msg; +	__le16 *seq_ptr = msg + info->seq_off; +	u16 seq = 0, len, segs_off; +	void *resp = cmn_resp; +	dma_addr_t dma_handle; +	int rc, off = 0; +	void *dma_buf; + +	dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, +				     GFP_KERNEL); +	if (!dma_buf) +		return -ENOMEM; + +	segs_off = offsetof(struct hwrm_dbg_coredump_list_output, +			    total_segments); +	cmn_req->host_dest_addr = cpu_to_le64(dma_handle); +	cmn_req->host_buf_len = cpu_to_le32(info->dma_len); +	mutex_lock(&bp->hwrm_cmd_lock); +	while (1) { +		*seq_ptr = cpu_to_le16(seq); +		rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); +		if (rc) +			break; + +		len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); +		if (!seq && +		    cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { +			info->segs = le16_to_cpu(*((__le16 *)(resp + +							      segs_off))); +			if (!info->segs) { +				rc = -EIO; +				break; +			} + +			info->dest_buf_size = info->segs * +					sizeof(struct coredump_segment_record); +			info->dest_buf = kmalloc(info->dest_buf_size, +						 GFP_KERNEL); +			if (!info->dest_buf) { +				rc = -ENOMEM; +				break; +			} +		} + +		if (info->dest_buf) +			memcpy(info->dest_buf + off, dma_buf, len); + +		if (cmn_req->req_type == +				cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) +			info->dest_buf_size += len; + +		if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) +			break; + +		seq++; +		off += len; +	} +	mutex_unlock(&bp->hwrm_cmd_lock); +	dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); +	return rc; +} + +static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, +				       struct bnxt_coredump *coredump) +{ +	struct hwrm_dbg_coredump_list_input req = {0}; +	struct bnxt_hwrm_dbg_dma_info info = {NULL}; +	int rc; + +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); + +	info.dma_len = COREDUMP_LIST_BUF_LEN; +	info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); +	info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, +				     data_len); + +	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); +	if (!rc) { +		coredump->data = info.dest_buf; +		coredump->data_size = info.dest_buf_size; +		coredump->total_segs = info.segs; +	} +	return rc; +} + +static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, +					   u16 segment_id) +{ +	struct hwrm_dbg_coredump_initiate_input req = {0}; + +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); +	req.component_id = cpu_to_le16(component_id); +	req.segment_id = cpu_to_le16(segment_id); + +	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, +					   u16 segment_id, u32 *seg_len, +					   void *buf, u32 offset) +{ +	struct hwrm_dbg_coredump_retrieve_input req = {0}; +	struct bnxt_hwrm_dbg_dma_info info = {NULL}; +	int rc; + +	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); +	req.component_id = cpu_to_le16(component_id); +	req.segment_id = cpu_to_le16(segment_id); + +	info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; +	info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, +				seq_no); +	info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, +				     data_len); +	if (buf) +		info.dest_buf = buf + offset; + +	rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); +	if (!rc) +		*seg_len = info.dest_buf_size; + +	return rc; +} + +static void +bnxt_fill_coredump_seg_hdr(struct bnxt *bp, +			   struct bnxt_coredump_segment_hdr *seg_hdr, +			   struct coredump_segment_record *seg_rec, u32 seg_len, +			   int status, u32 duration, u32 instance) +{ +	memset(seg_hdr, 0, sizeof(*seg_hdr)); +	memcpy(seg_hdr->signature, "sEgM", 4); +	if (seg_rec) { +		seg_hdr->component_id = (__force __le32)seg_rec->component_id; +		seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; +		seg_hdr->low_version = seg_rec->version_low; +		seg_hdr->high_version = seg_rec->version_hi; +	} else { +		/* For hwrm_ver_get response Component id = 2 +		 * and Segment id = 0 +		 */ +		seg_hdr->component_id = cpu_to_le32(2); +		seg_hdr->segment_id = 0; +	} +	seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); +	seg_hdr->length = cpu_to_le32(seg_len); +	seg_hdr->status = cpu_to_le32(status); +	seg_hdr->duration = cpu_to_le32(duration); +	seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); +	seg_hdr->instance = cpu_to_le32(instance); +} + +static void +bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, +			  time64_t start, s16 start_utc, u16 total_segs, +			  int status) +{ +	time64_t end = ktime_get_real_seconds(); +	u32 os_ver_major = 0, os_ver_minor = 0; +	struct tm tm; + +	time64_to_tm(start, 0, &tm); +	memset(record, 0, sizeof(*record)); +	memcpy(record->signature, "cOrE", 4); +	record->flags = 0; +	record->low_version = 0; +	record->high_version = 1; +	record->asic_state = 0; +	strlcpy(record->system_name, utsname()->nodename, +		sizeof(record->system_name)); +	record->year = cpu_to_le16(tm.tm_year); +	record->month = cpu_to_le16(tm.tm_mon); +	record->day = cpu_to_le16(tm.tm_mday); +	record->hour = cpu_to_le16(tm.tm_hour); +	record->minute = cpu_to_le16(tm.tm_min); +	record->second = cpu_to_le16(tm.tm_sec); +	record->utc_bias = cpu_to_le16(start_utc); +	strcpy(record->commandline, "ethtool -w"); +	record->total_segments = cpu_to_le32(total_segs); + +	sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor); +	record->os_ver_major = cpu_to_le32(os_ver_major); +	record->os_ver_minor = cpu_to_le32(os_ver_minor); + +	strlcpy(record->os_name, utsname()->sysname, 32); +	time64_to_tm(end, 0, &tm); +	record->end_year = cpu_to_le16(tm.tm_year + 1900); +	record->end_month = cpu_to_le16(tm.tm_mon + 1); +	record->end_day = cpu_to_le16(tm.tm_mday); +	record->end_hour = cpu_to_le16(tm.tm_hour); +	record->end_minute = cpu_to_le16(tm.tm_min); +	record->end_second = cpu_to_le16(tm.tm_sec); +	record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60); +	record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | +				       bp->ver_resp.chip_rev << 8 | +				       bp->ver_resp.chip_metal); +	record->asic_id2 = 0; +	record->coredump_status = cpu_to_le32(status); +	record->ioctl_low_version = 0; +	record->ioctl_high_version = 0; +} + +static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) +{ +	u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); +	struct coredump_segment_record *seg_record = NULL; +	u32 offset = 0, seg_hdr_len, seg_record_len; +	struct bnxt_coredump_segment_hdr seg_hdr; +	struct bnxt_coredump coredump = {NULL}; +	time64_t start_time; +	u16 start_utc; +	int rc = 0, i; + +	start_time = ktime_get_real_seconds(); +	start_utc = sys_tz.tz_minuteswest * 60; +	seg_hdr_len = sizeof(seg_hdr); + +	/* First segment should be hwrm_ver_get response */ +	*dump_len = seg_hdr_len + ver_get_resp_len; +	if (buf) { +		bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, +					   0, 0, 0); +		memcpy(buf + offset, &seg_hdr, seg_hdr_len); +		offset += seg_hdr_len; +		memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); +		offset += ver_get_resp_len; +	} + +	rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); +	if (rc) { +		netdev_err(bp->dev, "Failed to get coredump segment list\n"); +		goto err; +	} + +	*dump_len += seg_hdr_len * coredump.total_segs; + +	seg_record = (struct coredump_segment_record *)coredump.data; +	seg_record_len = sizeof(*seg_record); + +	for (i = 0; i < coredump.total_segs; i++) { +		u16 comp_id = le16_to_cpu(seg_record->component_id); +		u16 seg_id = le16_to_cpu(seg_record->segment_id); +		u32 duration = 0, seg_len = 0; +		unsigned long start, end; + +		start = jiffies; + +		rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); +		if (rc) { +			netdev_err(bp->dev, +				   "Failed to initiate coredump for seg = %d\n", +				   seg_record->segment_id); +			goto next_seg; +		} + +		/* Write segment data into the buffer */ +		rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, +						     &seg_len, buf, +						     offset + seg_hdr_len); +		if (rc) +			netdev_err(bp->dev, +				   "Failed to retrieve coredump for seg = %d\n", +				   seg_record->segment_id); + +next_seg: +		end = jiffies; +		duration = jiffies_to_msecs(end - start); +		bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, +					   rc, duration, 0); + +		if (buf) { +			/* Write segment header into the buffer */ +			memcpy(buf + offset, &seg_hdr, seg_hdr_len); +			offset += seg_hdr_len + seg_len; +		} + +		*dump_len += seg_len; +		seg_record = +			(struct coredump_segment_record *)((u8 *)seg_record + +							   seg_record_len); +	} + +err: +	if (buf) +		bnxt_fill_coredump_record(bp, buf + offset, start_time, +					  start_utc, coredump.total_segs + 1, +					  rc); +	kfree(coredump.data); +	*dump_len += sizeof(struct bnxt_coredump_record); + +	return rc; +} + +static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) +{ +	struct bnxt *bp = netdev_priv(dev); + +	if (bp->hwrm_spec_code < 0x10801) +		return -EOPNOTSUPP; + +	dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | +			bp->ver_resp.hwrm_fw_min_8b << 16 | +			bp->ver_resp.hwrm_fw_bld_8b << 8 | +			bp->ver_resp.hwrm_fw_rsvd_8b; + +	return bnxt_get_coredump(bp, NULL, &dump->len); +} + +static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, +			      void *buf) +{ +	struct bnxt *bp = netdev_priv(dev); + +	if (bp->hwrm_spec_code < 0x10801) +		return -EOPNOTSUPP; + +	memset(buf, 0, dump->len); + +	return bnxt_get_coredump(bp, buf, &dump->len); +} +  void bnxt_ethtool_init(struct bnxt *bp)  {  	struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; @@ -2702,6 +3053,8 @@ void bnxt_ethtool_init(struct bnxt *bp)  			strcpy(str, "Mac loopback test (offline)");  		} else if (i == BNXT_PHYLPBK_TEST_IDX) {  			strcpy(str, "Phy loopback test (offline)"); +		} else if (i == BNXT_EXTLPBK_TEST_IDX) { +			strcpy(str, "Ext loopback test (offline)");  		} else if (i == BNXT_IRQ_TEST_IDX) {  			strcpy(str, "Interrupt_test (offline)");  		} else { @@ -2763,4 +3116,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {  	.set_phys_id		= bnxt_set_phys_id,  	.self_test		= bnxt_self_test,  	.reset			= bnxt_reset, +	.get_dump_flag		= bnxt_get_dump_flag, +	.get_dump_data		= bnxt_get_dump_data,  }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 836ef682f24c..b5b65b3f8534 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -22,6 +22,43 @@ struct bnxt_led_cfg {  	u8 rsvd;  }; +#define COREDUMP_LIST_BUF_LEN		2048 +#define COREDUMP_RETRIEVE_BUF_LEN	4096 + +struct bnxt_coredump { +	void		*data; +	int		data_size; +	u16		total_segs; +}; + +struct bnxt_hwrm_dbg_dma_info { +	void *dest_buf; +	int dest_buf_size; +	u16 dma_len; +	u16 seq_off; +	u16 data_len_off; +	u16 segs; +}; + +struct hwrm_dbg_cmn_input { +	__le16 req_type; +	__le16 cmpl_ring; +	__le16 seq_id; +	__le16 target_id; +	__le64 resp_addr; +	__le64 host_dest_addr; +	__le32 host_buf_len; +}; + +struct hwrm_dbg_cmn_output { +	__le16 error_code; +	__le16 req_type; +	__le16 seq_id; +	__le16 resp_len; +	u8 flags; +	#define HWRM_DBG_CMN_FLAGS_MORE	1 +}; +  #define BNXT_LED_DFLT_ENA				\  	(PORT_LED_CFG_REQ_ENABLES_LED0_ID |		\  	 PORT_LED_CFG_REQ_ENABLES_LED0_STATE |		\ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 0fe0ea8dce6c..971ace5d0d4a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -96,6 +96,7 @@ struct hwrm_short_input {  struct cmd_nums {  	__le16	req_type;  	#define HWRM_VER_GET                              0x0UL +	#define HWRM_FUNC_DRV_IF_CHANGE                   0xdUL  	#define HWRM_FUNC_BUF_UNRGTR                      0xeUL  	#define HWRM_FUNC_VF_CFG                          0xfUL  	#define HWRM_RESERVED1                            0x10UL @@ -159,6 +160,7 @@ struct cmd_nums {  	#define HWRM_RING_FREE                            0x51UL  	#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS        0x52UL  	#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS     0x53UL +	#define HWRM_RING_AGGINT_QCAPS                    0x54UL  	#define HWRM_RING_RESET                           0x5eUL  	#define HWRM_RING_GRP_ALLOC                       0x60UL  	#define HWRM_RING_GRP_FREE                        0x61UL @@ -191,6 +193,8 @@ struct cmd_nums {  	#define HWRM_PORT_QSTATS_EXT                      0xb4UL  	#define HWRM_FW_RESET                             0xc0UL  	#define HWRM_FW_QSTATUS                           0xc1UL +	#define HWRM_FW_HEALTH_CHECK                      0xc2UL +	#define HWRM_FW_SYNC                              0xc3UL  	#define HWRM_FW_SET_TIME                          0xc8UL  	#define HWRM_FW_GET_TIME                          0xc9UL  	#define HWRM_FW_SET_STRUCTURED_DATA               0xcaUL @@ -269,6 +273,11 @@ struct cmd_nums {  	#define HWRM_ENGINE_ON_DIE_RQE_CREDITS            0x164UL  	#define HWRM_FUNC_RESOURCE_QCAPS                  0x190UL  	#define HWRM_FUNC_VF_RESOURCE_CFG                 0x191UL +	#define HWRM_FUNC_BACKING_STORE_QCAPS             0x192UL +	#define HWRM_FUNC_BACKING_STORE_CFG               0x193UL +	#define HWRM_FUNC_BACKING_STORE_QCFG              0x194UL +	#define HWRM_FUNC_VF_BW_CFG                       0x195UL +	#define HWRM_FUNC_VF_BW_QCFG                      0x196UL  	#define HWRM_SELFTEST_QLIST                       0x200UL  	#define HWRM_SELFTEST_EXEC                        0x201UL  	#define HWRM_SELFTEST_IRQ                         0x202UL @@ -284,6 +293,8 @@ struct cmd_nums {  	#define HWRM_DBG_COREDUMP_LIST                    0xff17UL  	#define HWRM_DBG_COREDUMP_INITIATE                0xff18UL  	#define HWRM_DBG_COREDUMP_RETRIEVE                0xff19UL +	#define HWRM_DBG_FW_CLI                           0xff1aUL +	#define HWRM_DBG_I2C_CMD                          0xff1bUL  	#define HWRM_NVM_FACTORY_DEFAULTS                 0xffeeUL  	#define HWRM_NVM_VALIDATE_OPTION                  0xffefUL  	#define HWRM_NVM_FLUSH                            0xfff0UL @@ -318,6 +329,7 @@ struct ret_codes {  	#define HWRM_ERR_CODE_INVALID_ENABLES        0x6UL  	#define HWRM_ERR_CODE_UNSUPPORTED_TLV        0x7UL  	#define HWRM_ERR_CODE_NO_BUFFER              0x8UL +	#define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL  	#define HWRM_ERR_CODE_HWRM_ERROR             0xfUL  	#define HWRM_ERR_CODE_UNKNOWN_ERR            0xfffeUL  	#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED      0xffffUL @@ -344,9 +356,9 @@ struct hwrm_err_output {  #define HWRM_RESP_VALID_KEY 1  #define HWRM_VERSION_MAJOR 1  #define HWRM_VERSION_MINOR 9 -#define HWRM_VERSION_UPDATE 1 -#define HWRM_VERSION_RSVD 15 -#define HWRM_VERSION_STR "1.9.1.15" +#define HWRM_VERSION_UPDATE 2 +#define HWRM_VERSION_RSVD 25 +#define HWRM_VERSION_STR "1.9.2.25"  /* hwrm_ver_get_input (size:192b/24B) */  struct hwrm_ver_get_input { @@ -526,6 +538,7 @@ struct hwrm_async_event_cmpl {  	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE   0x32UL  	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE              0x33UL  	#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE            0x34UL +	#define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE        0x35UL  	#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR                 0xffUL  	#define ASYNC_EVENT_CMPL_EVENT_ID_LAST                      ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR  	__le32	event_data2; @@ -564,6 +577,8 @@ struct hwrm_async_event_cmpl_link_status_change {  	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT        1  	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK    0xffff0UL  	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT     4 +	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK      0xff00000UL +	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT       20  };  /* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ @@ -817,23 +832,26 @@ struct hwrm_func_qcaps_output {  	__le16	fid;  	__le16	port_id;  	__le32	flags; -	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED            0x1UL -	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING        0x2UL -	#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED                  0x4UL -	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED              0x8UL -	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED              0x10UL -	#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED         0x20UL -	#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED              0x40UL -	#define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED           0x80UL -	#define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED            0x100UL -	#define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED        0x200UL -	#define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED            0x400UL -	#define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED     0x800UL -	#define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED     0x1000UL -	#define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED      0x2000UL -	#define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED        0x4000UL -	#define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED       0x8000UL -	#define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED           0x10000UL +	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED             0x1UL +	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING         0x2UL +	#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED                   0x4UL +	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED               0x8UL +	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED               0x10UL +	#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED          0x20UL +	#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED               0x40UL +	#define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED            0x80UL +	#define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED             0x100UL +	#define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED         0x200UL +	#define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED             0x400UL +	#define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED      0x800UL +	#define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED      0x1000UL +	#define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED       0x2000UL +	#define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED         0x4000UL +	#define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED        0x8000UL +	#define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED            0x10000UL +	#define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED            0x20000UL +	#define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED              0x40000UL +	#define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED     0x80000UL  	u8	mac_address[6];  	__le16	max_rsscos_ctx;  	__le16	max_cmpl_rings; @@ -947,58 +965,26 @@ struct hwrm_func_qcfg_output {  	#define FUNC_QCFG_RESP_EVB_MODE_VEPA   0x2UL  	#define FUNC_QCFG_RESP_EVB_MODE_LAST  FUNC_QCFG_RESP_EVB_MODE_VEPA  	u8	options; -	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK    0x3UL -	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT     0 -	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64   0x0UL -	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128  0x1UL -	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST     FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 -	#define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK              0xfcUL -	#define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT               2 +	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK         0x3UL +	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT          0 +	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64        0x0UL +	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128       0x1UL +	#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST          FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 +	#define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK       0xcUL +	#define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT        2 +	#define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN  (0x0UL << 2) +	#define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP    (0x1UL << 2) +	#define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO         (0x2UL << 2) +	#define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST        FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO +	#define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK                   0xf0UL +	#define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT                    4  	__le16	alloc_vfs;  	__le32	alloc_mcast_filters;  	__le32	alloc_hw_ring_grps;  	__le16	alloc_sp_tx_rings;  	__le16	alloc_stat_ctx; -	u8	unused_2[7]; -	u8	valid; -}; - -/* hwrm_func_vlan_cfg_input (size:384b/48B) */ -struct hwrm_func_vlan_cfg_input { -	__le16	req_type; -	__le16	cmpl_ring; -	__le16	seq_id; -	__le16	target_id; -	__le64	resp_addr; -	__le16	fid; -	u8	unused_0[2]; -	__le32	enables; -	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID      0x1UL -	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID      0x2UL -	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP      0x4UL -	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP      0x8UL -	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID     0x10UL -	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID     0x20UL -	__le16	stag_vid; -	u8	stag_pcp; -	u8	unused_1; -	__be16	stag_tpid; -	__le16	ctag_vid; -	u8	ctag_pcp; -	u8	unused_2; -	__be16	ctag_tpid; -	__le32	rsvd1; -	__le32	rsvd2; -	u8	unused_3[4]; -}; - -/* hwrm_func_vlan_cfg_output (size:128b/16B) */ -struct hwrm_func_vlan_cfg_output { -	__le16	error_code; -	__le16	req_type; -	__le16	seq_id; -	__le16	resp_len; -	u8	unused_0[7]; +	__le16	alloc_msix; +	u8	unused_2[5];  	u8	valid;  }; @@ -1010,7 +996,7 @@ struct hwrm_func_cfg_input {  	__le16	target_id;  	__le64	resp_addr;  	__le16	fid; -	u8	unused_0[2]; +	__le16	num_msix;  	__le32	flags;  	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE     0x1UL  	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE      0x2UL @@ -1050,6 +1036,8 @@ struct hwrm_func_cfg_input {  	#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS       0x40000UL  	#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS        0x80000UL  	#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE          0x100000UL +	#define FUNC_CFG_REQ_ENABLES_NUM_MSIX                0x200000UL +	#define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE        0x400000UL  	__le16	mtu;  	__le16	mru;  	__le16	num_rsscos_ctxs; @@ -1109,13 +1097,19 @@ struct hwrm_func_cfg_input {  	#define FUNC_CFG_REQ_EVB_MODE_VEPA   0x2UL  	#define FUNC_CFG_REQ_EVB_MODE_LAST  FUNC_CFG_REQ_EVB_MODE_VEPA  	u8	options; -	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK    0x3UL -	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT     0 -	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64   0x0UL -	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128  0x1UL -	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST     FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 -	#define FUNC_CFG_REQ_OPTIONS_RSVD_MASK              0xfcUL -	#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT               2 +	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK         0x3UL +	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT          0 +	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64        0x0UL +	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128       0x1UL +	#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST          FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 +	#define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK       0xcUL +	#define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT        2 +	#define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN  (0x0UL << 2) +	#define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP    (0x1UL << 2) +	#define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO         (0x2UL << 2) +	#define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST        FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO +	#define FUNC_CFG_REQ_OPTIONS_RSVD_MASK                   0xf0UL +	#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT                    4  	__le16	num_mcast_filters;  }; @@ -1212,30 +1206,6 @@ struct hwrm_func_vf_resc_free_output {  	u8	valid;  }; -/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ -struct hwrm_func_vf_vnic_ids_query_input { -	__le16	req_type; -	__le16	cmpl_ring; -	__le16	seq_id; -	__le16	target_id; -	__le64	resp_addr; -	__le16	vf_id; -	u8	unused_0[2]; -	__le32	max_vnic_id_cnt; -	__le64	vnic_id_tbl_addr; -}; - -/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ -struct hwrm_func_vf_vnic_ids_query_output { -	__le16	error_code; -	__le16	req_type; -	__le16	seq_id; -	__le16	resp_len; -	__le32	vnic_id_cnt; -	u8	unused_0[3]; -	u8	valid; -}; -  /* hwrm_func_drv_rgtr_input (size:896b/112B) */  struct hwrm_func_drv_rgtr_input {  	__le16	req_type; @@ -1286,7 +1256,9 @@ struct hwrm_func_drv_rgtr_output {  	__le16	req_type;  	__le16	seq_id;  	__le16	resp_len; -	u8	unused_0[7]; +	__le32	flags; +	#define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED     0x1UL +	u8	unused_0[3];  	u8	valid;  }; @@ -1372,7 +1344,7 @@ struct hwrm_func_drv_qver_input {  	u8	unused_0[2];  }; -/* hwrm_func_drv_qver_output (size:192b/24B) */ +/* hwrm_func_drv_qver_output (size:256b/32B) */  struct hwrm_func_drv_qver_output {  	__le16	error_code;  	__le16	req_type; @@ -1394,12 +1366,13 @@ struct hwrm_func_drv_qver_output {  	u8	ver_maj_8b;  	u8	ver_min_8b;  	u8	ver_upd_8b; -	u8	unused_0[2]; -	u8	valid; +	u8	unused_0[3];  	__le16	ver_maj;  	__le16	ver_min;  	__le16	ver_upd;  	__le16	ver_patch; +	u8	unused_1[7]; +	u8	valid;  };  /* hwrm_func_resource_qcaps_input (size:192b/24B) */ @@ -1493,6 +1466,410 @@ struct hwrm_func_vf_resource_cfg_output {  	u8	valid;  }; +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { +	__le16	req_type; +	__le16	cmpl_ring; +	__le16	seq_id; +	__le16	target_id; +	__le64	resp_addr; +}; + +/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */ +struct hwrm_func_backing_store_qcaps_output { +	__le16	error_code; +	__le16	req_type; +	__le16	seq_id; +	__le16	resp_len; +	__le32	qp_max_entries; +	__le16	qp_min_qp1_entries; +	__le16	qp_max_l2_entries; +	__le16	qp_entry_size; +	__le16	srq_max_l2_entries; +	__le32	srq_max_entries; +	__le16	srq_entry_size; +	__le16	cq_max_l2_entries; +	__le32	cq_max_entries; +	__le16	cq_entry_size; +	__le16	vnic_max_vnic_entries; +	__le16	vnic_max_ring_table_entries; +	__le16	vnic_entry_size; +	__le32	stat_max_entries; +	__le16	stat_entry_size; +	__le16	tqm_entry_size; +	__le32	tqm_min_entries_per_ring; +	__le32	tqm_max_entries_per_ring; +	__le32	mrav_max_entries; +	__le16	mrav_entry_size; +	__le16	tim_entry_size; +	__le32	tim_max_entries; +	u8	unused_0[3]; +	u8	valid; +}; + +/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */ +struct hwrm_func_backing_store_cfg_input { +	__le16	req_type; +	__le16	cmpl_ring; +	__le16	seq_id; +	__le16	target_id; +	__le64	resp_addr; +	__le32	flags; +	#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE     0x1UL +	__le32	enables; +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP            0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ           0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ            0x4UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC          0x8UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT          0x10UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP        0x20UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0     0x40UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1     0x80UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2     0x100UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3     0x200UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4     0x400UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5     0x800UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6     0x1000UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7     0x2000UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV          0x4000UL +	#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM           0x8000UL +	u8	qpc_pg_size_qpc_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G +	u8	srq_pg_size_srq_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G +	u8	cq_pg_size_cq_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G +	u8	vnic_pg_size_vnic_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G +	u8	stat_pg_size_stat_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G +	u8	tqm_sp_pg_size_tqm_sp_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G +	u8	tqm_ring0_pg_size_tqm_ring0_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G +	u8	tqm_ring1_pg_size_tqm_ring1_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G +	u8	tqm_ring2_pg_size_tqm_ring2_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G +	u8	tqm_ring3_pg_size_tqm_ring3_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G +	u8	tqm_ring4_pg_size_tqm_ring4_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G +	u8	tqm_ring5_pg_size_tqm_ring5_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G +	u8	tqm_ring6_pg_size_tqm_ring6_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G +	u8	tqm_ring7_pg_size_tqm_ring7_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G +	u8	mrav_pg_size_mrav_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G +	u8	tim_pg_size_tim_lvl; +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK      0xfUL +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT       0 +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0       0x0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1       0x1UL +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2       0x2UL +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK  0xf0UL +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT   4 +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K   (0x0UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K   (0x1UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K  (0x2UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M   (0x3UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M   (0x4UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G   (0x5UL << 4) +	#define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G +	__le64	qpc_page_dir; +	__le64	srq_page_dir; +	__le64	cq_page_dir; +	__le64	vnic_page_dir; +	__le64	stat_page_dir; +	__le64	tqm_sp_page_dir; +	__le64	tqm_ring0_page_dir; +	__le64	tqm_ring1_page_dir; +	__le64	tqm_ring2_page_dir; +	__le64	tqm_ring3_page_dir; +	__le64	tqm_ring4_page_dir; +	__le64	tqm_ring5_page_dir; +	__le64	tqm_ring6_page_dir; +	__le64	tqm_ring7_page_dir; +	__le64	mrav_page_dir; +	__le64	tim_page_dir; +	__le32	qp_num_entries; +	__le32	srq_num_entries; +	__le32	cq_num_entries; +	__le32	stat_num_entries; +	__le32	tqm_sp_num_entries; +	__le32	tqm_ring0_num_entries; +	__le32	tqm_ring1_num_entries; +	__le32	tqm_ring2_num_entries; +	__le32	tqm_ring3_num_entries; +	__le32	tqm_ring4_num_entries; +	__le32	tqm_ring5_num_entries; +	__le32	tqm_ring6_num_entries; +	__le32	tqm_ring7_num_entries; +	__le32	mrav_num_entries; +	__le32	tim_num_entries; +	__le16	qp_num_qp1_entries; +	__le16	qp_num_l2_entries; +	__le16	qp_entry_size; +	__le16	srq_num_l2_entries; +	__le16	srq_entry_size; +	__le16	cq_num_l2_entries; +	__le16	cq_entry_size; +	__le16	vnic_num_vnic_entries; +	__le16	vnic_num_ring_table_entries; +	__le16	vnic_entry_size; +	__le16	stat_entry_size; +	__le16	tqm_entry_size; +	__le16	mrav_entry_size; +	__le16	tim_entry_size; +}; + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { +	__le16	error_code; +	__le16	req_type; +	__le16	seq_id; +	__le16	resp_len; +	u8	unused_0[7]; +	u8	valid; +}; + +/* hwrm_func_drv_if_change_input (size:192b/24B) */ +struct hwrm_func_drv_if_change_input { +	__le16	req_type; +	__le16	cmpl_ring; +	__le16	seq_id; +	__le16	target_id; +	__le64	resp_addr; +	__le32	flags; +	#define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP     0x1UL +	__le32	unused; +}; + +/* hwrm_func_drv_if_change_output (size:128b/16B) */ +struct hwrm_func_drv_if_change_output { +	__le16	error_code; +	__le16	req_type; +	__le16	seq_id; +	__le16	resp_len; +	__le32	flags; +	#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE     0x1UL +	u8	unused_0[3]; +	u8	valid; +}; +  /* hwrm_port_phy_cfg_input (size:448b/56B) */  struct hwrm_port_phy_cfg_input {  	__le16	req_type; @@ -1592,10 +1969,11 @@ struct hwrm_port_phy_cfg_input {  	#define PORT_PHY_CFG_REQ_WIRESPEED_ON  0x1UL  	#define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON  	u8	lpbk; -	#define PORT_PHY_CFG_REQ_LPBK_NONE   0x0UL -	#define PORT_PHY_CFG_REQ_LPBK_LOCAL  0x1UL -	#define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL -	#define PORT_PHY_CFG_REQ_LPBK_LAST  PORT_PHY_CFG_REQ_LPBK_REMOTE +	#define PORT_PHY_CFG_REQ_LPBK_NONE     0x0UL +	#define PORT_PHY_CFG_REQ_LPBK_LOCAL    0x1UL +	#define PORT_PHY_CFG_REQ_LPBK_REMOTE   0x2UL +	#define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL +	#define PORT_PHY_CFG_REQ_LPBK_LAST    PORT_PHY_CFG_REQ_LPBK_EXTERNAL  	u8	force_pause;  	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX     0x1UL  	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX     0x2UL @@ -1751,10 +2129,11 @@ struct hwrm_port_phy_qcfg_output {  	#define PORT_PHY_QCFG_RESP_WIRESPEED_ON  0x1UL  	#define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON  	u8	lpbk; -	#define PORT_PHY_QCFG_RESP_LPBK_NONE   0x0UL -	#define PORT_PHY_QCFG_RESP_LPBK_LOCAL  0x1UL -	#define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL -	#define PORT_PHY_QCFG_RESP_LPBK_LAST  PORT_PHY_QCFG_RESP_LPBK_REMOTE +	#define PORT_PHY_QCFG_RESP_LPBK_NONE     0x0UL +	#define PORT_PHY_QCFG_RESP_LPBK_LOCAL    0x1UL +	#define PORT_PHY_QCFG_RESP_LPBK_REMOTE   0x2UL +	#define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL +	#define PORT_PHY_QCFG_RESP_LPBK_LAST    PORT_PHY_QCFG_RESP_LPBK_EXTERNAL  	u8	force_pause;  	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX     0x1UL  	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX     0x2UL @@ -2014,6 +2393,131 @@ struct hwrm_port_mac_ptp_qcfg_output {  	u8	valid;  }; +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { +	__le64	tx_64b_frames; +	__le64	tx_65b_127b_frames; +	__le64	tx_128b_255b_frames; +	__le64	tx_256b_511b_frames; +	__le64	tx_512b_1023b_frames; +	__le64	tx_1024b_1518b_frames; +	__le64	tx_good_vlan_frames; +	__le64	tx_1519b_2047b_frames; +	__le64	tx_2048b_4095b_frames; +	__le64	tx_4096b_9216b_frames; +	__le64	tx_9217b_16383b_frames; +	__le64	tx_good_frames; +	__le64	tx_total_frames; +	__le64	tx_ucast_frames; +	__le64	tx_mcast_frames; +	__le64	tx_bcast_frames; +	__le64	tx_pause_frames; +	__le64	tx_pfc_frames; +	__le64	tx_jabber_frames; +	__le64	tx_fcs_err_frames; +	__le64	tx_control_frames; +	__le64	tx_oversz_frames; +	__le64	tx_single_dfrl_frames; +	__le64	tx_multi_dfrl_frames; +	__le64	tx_single_coll_frames; +	__le64	tx_multi_coll_frames; +	__le64	tx_late_coll_frames; +	__le64	tx_excessive_coll_frames; +	__le64	tx_frag_frames; +	__le64	tx_err; +	__le64	tx_tagged_frames; +	__le64	tx_dbl_tagged_frames; +	__le64	tx_runt_frames; +	__le64	tx_fifo_underruns; +	__le64	tx_pfc_ena_frames_pri0; +	__le64	tx_pfc_ena_frames_pri1; +	__le64	tx_pfc_ena_frames_pri2; +	__le64	tx_pfc_ena_frames_pri3; +	__le64	tx_pfc_ena_frames_pri4; +	__le64	tx_pfc_ena_frames_pri5; +	__le64	tx_pfc_ena_frames_pri6; +	__le64	tx_pfc_ena_frames_pri7; +	__le64	tx_eee_lpi_events; +	__le64	tx_eee_lpi_duration; +	__le64	tx_llfc_logical_msgs; +	__le64	tx_hcfc_msgs; +	__le64	tx_total_collisions; +	__le64	tx_bytes; +	__le64	tx_xthol_frames; +	__le64	tx_stat_discard; +	__le64	tx_stat_error; +}; + +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { +	__le64	rx_64b_frames; +	__le64	rx_65b_127b_frames; +	__le64	rx_128b_255b_frames; +	__le64	rx_256b_511b_frames; +	__le64	rx_512b_1023b_frames; +	__le64	rx_1024b_1518b_frames; +	__le64	rx_good_vlan_frames; +	__le64	rx_1519b_2047b_frames; +	__le64	rx_2048b_4095b_frames; +	__le64	rx_4096b_9216b_frames; +	__le64	rx_9217b_16383b_frames; +	__le64	rx_total_frames; +	__le64	rx_ucast_frames; +	__le64	rx_mcast_frames; +	__le64	rx_bcast_frames; +	__le64	rx_fcs_err_frames; +	__le64	rx_ctrl_frames; +	__le64	rx_pause_frames; +	__le64	rx_pfc_frames; +	__le64	rx_unsupported_opcode_frames; +	__le64	rx_unsupported_da_pausepfc_frames; +	__le64	rx_wrong_sa_frames; +	__le64	rx_align_err_frames; +	__le64	rx_oor_len_frames; +	__le64	rx_code_err_frames; +	__le64	rx_false_carrier_frames; +	__le64	rx_ovrsz_frames; +	__le64	rx_jbr_frames; +	__le64	rx_mtu_err_frames; +	__le64	rx_match_crc_frames; +	__le64	rx_promiscuous_frames; +	__le64	rx_tagged_frames; +	__le64	rx_double_tagged_frames; +	__le64	rx_trunc_frames; +	__le64	rx_good_frames; +	__le64	rx_pfc_xon2xoff_frames_pri0; +	__le64	rx_pfc_xon2xoff_frames_pri1; +	__le64	rx_pfc_xon2xoff_frames_pri2; +	__le64	rx_pfc_xon2xoff_frames_pri3; +	__le64	rx_pfc_xon2xoff_frames_pri4; +	__le64	rx_pfc_xon2xoff_frames_pri5; +	__le64	rx_pfc_xon2xoff_frames_pri6; +	__le64	rx_pfc_xon2xoff_frames_pri7; +	__le64	rx_pfc_ena_frames_pri0; +	__le64	rx_pfc_ena_frames_pri1; +	__le64	rx_pfc_ena_frames_pri2; +	__le64	rx_pfc_ena_frames_pri3; +	__le64	rx_pfc_ena_frames_pri4; +	__le64	rx_pfc_ena_frames_pri5; +	__le64	rx_pfc_ena_frames_pri6; +	__le64	rx_pfc_ena_frames_pri7; +	__le64	rx_sch_crc_err_frames; +	__le64	rx_undrsz_frames; +	__le64	rx_frag_frames; +	__le64	rx_eee_lpi_events; +	__le64	rx_eee_lpi_duration; +	__le64	rx_llfc_physical_msgs; +	__le64	rx_llfc_logical_msgs; +	__le64	rx_llfc_msgs_with_crc_err; +	__le64	rx_hcfc_msgs; +	__le64	rx_hcfc_msgs_with_crc_err; +	__le64	rx_bytes; +	__le64	rx_runt_bytes; +	__le64	rx_runt_frames; +	__le64	rx_stat_discard; +	__le64	rx_stat_err; +}; +  /* hwrm_port_qstats_input (size:320b/40B) */  struct hwrm_port_qstats_input {  	__le16	req_type; @@ -2039,6 +2543,83 @@ struct hwrm_port_qstats_output {  	u8	valid;  }; +/* tx_port_stats_ext (size:2048b/256B) */ +struct tx_port_stats_ext { +	__le64	tx_bytes_cos0; +	__le64	tx_bytes_cos1; +	__le64	tx_bytes_cos2; +	__le64	tx_bytes_cos3; +	__le64	tx_bytes_cos4; +	__le64	tx_bytes_cos5; +	__le64	tx_bytes_cos6; +	__le64	tx_bytes_cos7; +	__le64	tx_packets_cos0; +	__le64	tx_packets_cos1; +	__le64	tx_packets_cos2; +	__le64	tx_packets_cos3; +	__le64	tx_packets_cos4; +	__le64	tx_packets_cos5; +	__le64	tx_packets_cos6; +	__le64	tx_packets_cos7; +	__le64	pfc_pri0_tx_duration_us; +	__le64	pfc_pri0_tx_transitions; +	__le64	pfc_pri1_tx_duration_us; +	__le64	pfc_pri1_tx_transitions; +	__le64	pfc_pri2_tx_duration_us; +	__le64	pfc_pri2_tx_transitions; +	__le64	pfc_pri3_tx_duration_us; +	__le64	pfc_pri3_tx_transitions; +	__le64	pfc_pri4_tx_duration_us; +	__le64	pfc_pri4_tx_transitions; +	__le64	pfc_pri5_tx_duration_us; +	__le64	pfc_pri5_tx_transitions; +	__le64	pfc_pri6_tx_duration_us; +	__le64	pfc_pri6_tx_transitions; +	__le64	pfc_pri7_tx_duration_us; +	__le64	pfc_pri7_tx_transitions; +}; + +/* rx_port_stats_ext (size:2368b/296B) */ +struct rx_port_stats_ext { +	__le64	link_down_events; +	__le64	continuous_pause_events; +	__le64	resume_pause_events; +	__le64	continuous_roce_pause_events; +	__le64	resume_roce_pause_events; +	__le64	rx_bytes_cos0; +	__le64	rx_bytes_cos1; +	__le64	rx_bytes_cos2; +	__le64	rx_bytes_cos3; +	__le64	rx_bytes_cos4; +	__le64	rx_bytes_cos5; +	__le64	rx_bytes_cos6; +	__le64	rx_bytes_cos7; +	__le64	rx_packets_cos0; +	__le64	rx_packets_cos1; +	__le64	rx_packets_cos2; +	__le64	rx_packets_cos3; +	__le64	rx_packets_cos4; +	__le64	rx_packets_cos5; +	__le64	rx_packets_cos6; +	__le64	rx_packets_cos7; +	__le64	pfc_pri0_rx_duration_us; +	__le64	pfc_pri0_rx_transitions; +	__le64	pfc_pri1_rx_duration_us; +	__le64	pfc_pri1_rx_transitions; +	__le64	pfc_pri2_rx_duration_us; +	__le64	pfc_pri2_rx_transitions; +	__le64	pfc_pri3_rx_duration_us; +	__le64	pfc_pri3_rx_transitions; +	__le64	pfc_pri4_rx_duration_us; +	__le64	pfc_pri4_rx_transitions; +	__le64	pfc_pri5_rx_duration_us; +	__le64	pfc_pri5_rx_transitions; +	__le64	pfc_pri6_rx_duration_us; +	__le64	pfc_pri6_rx_transitions; +	__le64	pfc_pri7_rx_duration_us; +	__le64	pfc_pri7_rx_transitions; +}; +  /* hwrm_port_qstats_ext_input (size:320b/40B) */  struct hwrm_port_qstats_ext_input {  	__le16	req_type; @@ -2062,7 +2643,8 @@ struct hwrm_port_qstats_ext_output {  	__le16	resp_len;  	__le16	tx_stat_size;  	__le16	rx_stat_size; -	u8	unused_0[3]; +	__le16	total_active_cos_queues; +	u8	unused_0;  	u8	valid;  }; @@ -2153,9 +2735,10 @@ struct hwrm_port_phy_qcaps_output {  	__le16	seq_id;  	__le16	resp_len;  	u8	flags; -	#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED     0x1UL -	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK        0xfeUL -	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT         1 +	#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED               0x1UL +	#define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED     0x2UL +	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK                  0xfcUL +	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT                   2  	u8	port_cnt;  	#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL  	#define PORT_PHY_QCAPS_RESP_PORT_CNT_1       0x1UL @@ -2612,6 +3195,7 @@ struct hwrm_queue_qportcfg_output {  	u8	queue_id0;  	u8	queue_id0_service_profile;  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY          0x0UL +	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS       0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE  0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC   0x3UL @@ -2620,6 +3204,7 @@ struct hwrm_queue_qportcfg_output {  	u8	queue_id1;  	u8	queue_id1_service_profile;  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY          0x0UL +	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS       0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE  0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC   0x3UL @@ -2628,6 +3213,7 @@ struct hwrm_queue_qportcfg_output {  	u8	queue_id2;  	u8	queue_id2_service_profile;  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY          0x0UL +	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS       0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE  0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC   0x3UL @@ -2636,6 +3222,7 @@ struct hwrm_queue_qportcfg_output {  	u8	queue_id3;  	u8	queue_id3_service_profile;  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY          0x0UL +	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS       0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE  0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC   0x3UL @@ -2644,6 +3231,7 @@ struct hwrm_queue_qportcfg_output {  	u8	queue_id4;  	u8	queue_id4_service_profile;  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY          0x0UL +	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS       0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE  0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC   0x3UL @@ -2652,6 +3240,7 @@ struct hwrm_queue_qportcfg_output {  	u8	queue_id5;  	u8	queue_id5_service_profile;  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY          0x0UL +	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS       0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE  0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC   0x3UL @@ -2660,6 +3249,7 @@ struct hwrm_queue_qportcfg_output {  	u8	queue_id6;  	u8	queue_id6_service_profile;  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY          0x0UL +	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS       0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE  0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC   0x3UL @@ -2668,6 +3258,7 @@ struct hwrm_queue_qportcfg_output {  	u8	queue_id7;  	u8	queue_id7_service_profile;  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY          0x0UL +	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS       0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE  0x1UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL  	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC   0x3UL @@ -3689,18 +4280,21 @@ struct hwrm_vnic_cfg_input {  	#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE                     0x20UL  	#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE     0x40UL  	__le32	enables; -	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP     0x1UL -	#define VNIC_CFG_REQ_ENABLES_RSS_RULE          0x2UL -	#define VNIC_CFG_REQ_ENABLES_COS_RULE          0x4UL -	#define VNIC_CFG_REQ_ENABLES_LB_RULE           0x8UL -	#define VNIC_CFG_REQ_ENABLES_MRU               0x10UL +	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP            0x1UL +	#define VNIC_CFG_REQ_ENABLES_RSS_RULE                 0x2UL +	#define VNIC_CFG_REQ_ENABLES_COS_RULE                 0x4UL +	#define VNIC_CFG_REQ_ENABLES_LB_RULE                  0x8UL +	#define VNIC_CFG_REQ_ENABLES_MRU                      0x10UL +	#define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID       0x20UL +	#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID     0x40UL  	__le16	vnic_id;  	__le16	dflt_ring_grp;  	__le16	rss_rule;  	__le16	cos_rule;  	__le16	lb_rule;  	__le16	mru; -	u8	unused_0[4]; +	__le16	default_rx_ring_id; +	__le16	default_cmpl_ring_id;  };  /* hwrm_vnic_cfg_output (size:128b/16B) */ @@ -3740,6 +4334,7 @@ struct hwrm_vnic_qcaps_output {  	#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP                  0x10UL  	#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP                     0x20UL  	#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP     0x40UL +	#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP                   0x80UL  	u8	unused_1[7];  	u8	valid;  }; @@ -3857,7 +4452,14 @@ struct hwrm_vnic_rss_cfg_input {  	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6         0x8UL  	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6     0x10UL  	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6     0x20UL -	u8	unused_0[4]; +	__le16	vnic_id; +	u8	ring_table_pair_index; +	u8	hash_mode_flags; +	#define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT         0x1UL +	#define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4     0x2UL +	#define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2     0x4UL +	#define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4     0x8UL +	#define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2     0x10UL  	__le64	ring_grp_tbl_addr;  	__le64	hash_key_tbl_addr;  	__le16	rss_ctx_idx; @@ -3950,7 +4552,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {  	u8	valid;  }; -/* hwrm_ring_alloc_input (size:640b/80B) */ +/* hwrm_ring_alloc_input (size:704b/88B) */  struct hwrm_ring_alloc_input {  	__le16	req_type;  	__le16	cmpl_ring; @@ -3961,12 +4563,17 @@ struct hwrm_ring_alloc_input {  	#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG          0x2UL  	#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID     0x8UL  	#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID          0x20UL +	#define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID      0x40UL +	#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID      0x80UL +	#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID     0x100UL  	u8	ring_type;  	#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL   0x0UL  	#define RING_ALLOC_REQ_RING_TYPE_TX        0x1UL  	#define RING_ALLOC_REQ_RING_TYPE_RX        0x2UL  	#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL -	#define RING_ALLOC_REQ_RING_TYPE_LAST     RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL +	#define RING_ALLOC_REQ_RING_TYPE_RX_AGG    0x4UL +	#define RING_ALLOC_REQ_RING_TYPE_NQ        0x5UL +	#define RING_ALLOC_REQ_RING_TYPE_LAST     RING_ALLOC_REQ_RING_TYPE_NQ  	u8	unused_0[3];  	__le64	page_tbl_addr;  	__le32	fbo; @@ -3977,8 +4584,9 @@ struct hwrm_ring_alloc_input {  	__le16	logical_id;  	__le16	cmpl_ring_id;  	__le16	queue_id; -	u8	unused_2[2]; -	__le32	reserved1; +	__le16	rx_buf_size; +	__le16	rx_ring_id; +	__le16	nq_ring_id;  	__le16	ring_arb_cfg;  	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK      0xfUL  	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT       0 @@ -4016,6 +4624,7 @@ struct hwrm_ring_alloc_input {  	#define RING_ALLOC_REQ_INT_MODE_POLL   0x3UL  	#define RING_ALLOC_REQ_INT_MODE_LAST  RING_ALLOC_REQ_INT_MODE_POLL  	u8	unused_4[3]; +	__le64	cq_handle;  };  /* hwrm_ring_alloc_output (size:128b/16B) */ @@ -4042,7 +4651,9 @@ struct hwrm_ring_free_input {  	#define RING_FREE_REQ_RING_TYPE_TX        0x1UL  	#define RING_FREE_REQ_RING_TYPE_RX        0x2UL  	#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL -	#define RING_FREE_REQ_RING_TYPE_LAST     RING_FREE_REQ_RING_TYPE_ROCE_CMPL +	#define RING_FREE_REQ_RING_TYPE_RX_AGG    0x4UL +	#define RING_FREE_REQ_RING_TYPE_NQ        0x5UL +	#define RING_FREE_REQ_RING_TYPE_LAST     RING_FREE_REQ_RING_TYPE_NQ  	u8	unused_0;  	__le16	ring_id;  	u8	unused_1[4]; @@ -4058,6 +4669,52 @@ struct hwrm_ring_free_output {  	u8	valid;  }; +/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ +struct hwrm_ring_aggint_qcaps_input { +	__le16	req_type; +	__le16	cmpl_ring; +	__le16	seq_id; +	__le16	target_id; +	__le64	resp_addr; +}; + +/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */ +struct hwrm_ring_aggint_qcaps_output { +	__le16	error_code; +	__le16	req_type; +	__le16	seq_id; +	__le16	resp_len; +	__le32	cmpl_params; +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN                  0x1UL +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX                  0x2UL +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET                      0x4UL +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE                        0x8UL +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR                0x10UL +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT     0x20UL +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR                0x40UL +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT     0x80UL +	#define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT                0x100UL +	__le32	nq_params; +	#define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN     0x1UL +	__le16	num_cmpl_dma_aggr_min; +	__le16	num_cmpl_dma_aggr_max; +	__le16	num_cmpl_dma_aggr_during_int_min; +	__le16	num_cmpl_dma_aggr_during_int_max; +	__le16	cmpl_aggr_dma_tmr_min; +	__le16	cmpl_aggr_dma_tmr_max; +	__le16	cmpl_aggr_dma_tmr_during_int_min; +	__le16	cmpl_aggr_dma_tmr_during_int_max; +	__le16	int_lat_tmr_min_min; +	__le16	int_lat_tmr_min_max; +	__le16	int_lat_tmr_max_min; +	__le16	int_lat_tmr_max_max; +	__le16	num_cmpl_aggr_int_min; +	__le16	num_cmpl_aggr_int_max; +	__le16	timer_units; +	u8	unused_0[1]; +	u8	valid; +}; +  /* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */  struct hwrm_ring_cmpl_ring_qaggint_params_input {  	__le16	req_type; @@ -4100,6 +4757,7 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {  	__le16	flags;  	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET     0x1UL  	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE       0x2UL +	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ           0x4UL  	__le16	num_cmpl_dma_aggr;  	__le16	num_cmpl_dma_aggr_during_int;  	__le16	cmpl_aggr_dma_tmr; @@ -4107,7 +4765,14 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {  	__le16	int_lat_tmr_min;  	__le16	int_lat_tmr_max;  	__le16	num_cmpl_aggr_int; -	u8	unused_0[6]; +	__le16	enables; +	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR                0x1UL +	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT     0x2UL +	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR                0x4UL +	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN                  0x8UL +	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX                  0x10UL +	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT                0x20UL +	u8	unused_0[4];  };  /* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ @@ -4120,34 +4785,6 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {  	u8	valid;  }; -/* hwrm_ring_reset_input (size:192b/24B) */ -struct hwrm_ring_reset_input { -	__le16	req_type; -	__le16	cmpl_ring; -	__le16	seq_id; -	__le16	target_id; -	__le64	resp_addr; -	u8	ring_type; -	#define RING_RESET_REQ_RING_TYPE_L2_CMPL   0x0UL -	#define RING_RESET_REQ_RING_TYPE_TX        0x1UL -	#define RING_RESET_REQ_RING_TYPE_RX        0x2UL -	#define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL -	#define RING_RESET_REQ_RING_TYPE_LAST     RING_RESET_REQ_RING_TYPE_ROCE_CMPL -	u8	unused_0; -	__le16	ring_id; -	u8	unused_1[4]; -}; - -/* hwrm_ring_reset_output (size:128b/16B) */ -struct hwrm_ring_reset_output { -	__le16	error_code; -	__le16	req_type; -	__le16	seq_id; -	__le16	resp_len; -	u8	unused_0[7]; -	u8	valid; -}; -  /* hwrm_ring_grp_alloc_input (size:192b/24B) */  struct hwrm_ring_grp_alloc_input {  	__le16	req_type; @@ -5032,7 +5669,8 @@ struct hwrm_tunnel_dst_port_query_input {  	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN    0x1UL  	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE   0x5UL  	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL -	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 +	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL +	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1  	u8	unused_0[7];  }; @@ -5059,7 +5697,8 @@ struct hwrm_tunnel_dst_port_alloc_input {  	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN    0x1UL  	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE   0x5UL  	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL -	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 +	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL +	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1  	u8	unused_0;  	__be16	tunnel_dst_port_val;  	u8	unused_1[4]; @@ -5087,7 +5726,8 @@ struct hwrm_tunnel_dst_port_free_input {  	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN    0x1UL  	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE   0x5UL  	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL -	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 +	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL +	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1  	u8	unused_0;  	__le16	tunnel_dst_port_id;  	u8	unused_1[4]; @@ -5259,140 +5899,6 @@ struct hwrm_pcie_qstats_output {  	u8	valid;  }; -/* tx_port_stats (size:3264b/408B) */ -struct tx_port_stats { -	__le64	tx_64b_frames; -	__le64	tx_65b_127b_frames; -	__le64	tx_128b_255b_frames; -	__le64	tx_256b_511b_frames; -	__le64	tx_512b_1023b_frames; -	__le64	tx_1024b_1518_frames; -	__le64	tx_good_vlan_frames; -	__le64	tx_1519b_2047_frames; -	__le64	tx_2048b_4095b_frames; -	__le64	tx_4096b_9216b_frames; -	__le64	tx_9217b_16383b_frames; -	__le64	tx_good_frames; -	__le64	tx_total_frames; -	__le64	tx_ucast_frames; -	__le64	tx_mcast_frames; -	__le64	tx_bcast_frames; -	__le64	tx_pause_frames; -	__le64	tx_pfc_frames; -	__le64	tx_jabber_frames; -	__le64	tx_fcs_err_frames; -	__le64	tx_control_frames; -	__le64	tx_oversz_frames; -	__le64	tx_single_dfrl_frames; -	__le64	tx_multi_dfrl_frames; -	__le64	tx_single_coll_frames; -	__le64	tx_multi_coll_frames; -	__le64	tx_late_coll_frames; -	__le64	tx_excessive_coll_frames; -	__le64	tx_frag_frames; -	__le64	tx_err; -	__le64	tx_tagged_frames; -	__le64	tx_dbl_tagged_frames; -	__le64	tx_runt_frames; -	__le64	tx_fifo_underruns; -	__le64	tx_pfc_ena_frames_pri0; -	__le64	tx_pfc_ena_frames_pri1; -	__le64	tx_pfc_ena_frames_pri2; -	__le64	tx_pfc_ena_frames_pri3; -	__le64	tx_pfc_ena_frames_pri4; -	__le64	tx_pfc_ena_frames_pri5; -	__le64	tx_pfc_ena_frames_pri6; -	__le64	tx_pfc_ena_frames_pri7; -	__le64	tx_eee_lpi_events; -	__le64	tx_eee_lpi_duration; -	__le64	tx_llfc_logical_msgs; -	__le64	tx_hcfc_msgs; -	__le64	tx_total_collisions; -	__le64	tx_bytes; -	__le64	tx_xthol_frames; -	__le64	tx_stat_discard; -	__le64	tx_stat_error; -}; - -/* rx_port_stats (size:4224b/528B) */ -struct rx_port_stats { -	__le64	rx_64b_frames; -	__le64	rx_65b_127b_frames; -	__le64	rx_128b_255b_frames; -	__le64	rx_256b_511b_frames; -	__le64	rx_512b_1023b_frames; -	__le64	rx_1024b_1518_frames; -	__le64	rx_good_vlan_frames; -	__le64	rx_1519b_2047b_frames; -	__le64	rx_2048b_4095b_frames; -	__le64	rx_4096b_9216b_frames; -	__le64	rx_9217b_16383b_frames; -	__le64	rx_total_frames; -	__le64	rx_ucast_frames; -	__le64	rx_mcast_frames; -	__le64	rx_bcast_frames; -	__le64	rx_fcs_err_frames; -	__le64	rx_ctrl_frames; -	__le64	rx_pause_frames; -	__le64	rx_pfc_frames; -	__le64	rx_unsupported_opcode_frames; -	__le64	rx_unsupported_da_pausepfc_frames; -	__le64	rx_wrong_sa_frames; -	__le64	rx_align_err_frames; -	__le64	rx_oor_len_frames; -	__le64	rx_code_err_frames; -	__le64	rx_false_carrier_frames; -	__le64	rx_ovrsz_frames; -	__le64	rx_jbr_frames; -	__le64	rx_mtu_err_frames; -	__le64	rx_match_crc_frames; -	__le64	rx_promiscuous_frames; -	__le64	rx_tagged_frames; -	__le64	rx_double_tagged_frames; -	__le64	rx_trunc_frames; -	__le64	rx_good_frames; -	__le64	rx_pfc_xon2xoff_frames_pri0; -	__le64	rx_pfc_xon2xoff_frames_pri1; -	__le64	rx_pfc_xon2xoff_frames_pri2; -	__le64	rx_pfc_xon2xoff_frames_pri3; -	__le64	rx_pfc_xon2xoff_frames_pri4; -	__le64	rx_pfc_xon2xoff_frames_pri5; -	__le64	rx_pfc_xon2xoff_frames_pri6; -	__le64	rx_pfc_xon2xoff_frames_pri7; -	__le64	rx_pfc_ena_frames_pri0; -	__le64	rx_pfc_ena_frames_pri1; -	__le64	rx_pfc_ena_frames_pri2; -	__le64	rx_pfc_ena_frames_pri3; -	__le64	rx_pfc_ena_frames_pri4; -	__le64	rx_pfc_ena_frames_pri5; -	__le64	rx_pfc_ena_frames_pri6; -	__le64	rx_pfc_ena_frames_pri7; -	__le64	rx_sch_crc_err_frames; -	__le64	rx_undrsz_frames; -	__le64	rx_frag_frames; -	__le64	rx_eee_lpi_events; -	__le64	rx_eee_lpi_duration; -	__le64	rx_llfc_physical_msgs; -	__le64	rx_llfc_logical_msgs; -	__le64	rx_llfc_msgs_with_crc_err; -	__le64	rx_hcfc_msgs; -	__le64	rx_hcfc_msgs_with_crc_err; -	__le64	rx_bytes; -	__le64	rx_runt_bytes; -	__le64	rx_runt_frames; -	__le64	rx_stat_discard; -	__le64	rx_stat_err; -}; - -/* rx_port_stats_ext (size:320b/40B) */ -struct rx_port_stats_ext { -	__le64	link_down_events; -	__le64	continuous_pause_events; -	__le64	resume_pause_events; -	__le64	continuous_roce_pause_events; -	__le64	resume_roce_pause_events; -}; -  /* pcie_ctx_hw_stats (size:768b/96B) */  struct pcie_ctx_hw_stats {  	__le64	pcie_pl_signal_integrity; @@ -5884,6 +6390,114 @@ struct hwrm_wol_reason_qcfg_output {  	u8	valid;  }; +/* coredump_segment_record (size:128b/16B) */ +struct coredump_segment_record { +	__le16	component_id; +	__le16	segment_id; +	__le16	max_instances; +	u8	version_hi; +	u8	version_low; +	u8	seg_flags; +	u8	unused_0[7]; +}; + +/* hwrm_dbg_coredump_list_input (size:256b/32B) */ +struct hwrm_dbg_coredump_list_input { +	__le16	req_type; +	__le16	cmpl_ring; +	__le16	seq_id; +	__le16	target_id; +	__le64	resp_addr; +	__le64	host_dest_addr; +	__le32	host_buf_len; +	__le16	seq_no; +	u8	unused_0[2]; +}; + +/* hwrm_dbg_coredump_list_output (size:128b/16B) */ +struct hwrm_dbg_coredump_list_output { +	__le16	error_code; +	__le16	req_type; +	__le16	seq_id; +	__le16	resp_len; +	u8	flags; +	#define DBG_COREDUMP_LIST_RESP_FLAGS_MORE     0x1UL +	u8	unused_0; +	__le16	total_segments; +	__le16	data_len; +	u8	unused_1; +	u8	valid; +}; + +/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */ +struct hwrm_dbg_coredump_initiate_input { +	__le16	req_type; +	__le16	cmpl_ring; +	__le16	seq_id; +	__le16	target_id; +	__le64	resp_addr; +	__le16	component_id; +	__le16	segment_id; +	__le16	instance; +	__le16	unused_0; +	u8	seg_flags; +	u8	unused_1[7]; +}; + +/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */ +struct hwrm_dbg_coredump_initiate_output { +	__le16	error_code; +	__le16	req_type; +	__le16	seq_id; +	__le16	resp_len; +	u8	unused_0[7]; +	u8	valid; +}; + +/* coredump_data_hdr (size:128b/16B) */ +struct coredump_data_hdr { +	__le32	address; +	__le32	flags_length; +	__le32	instance; +	__le32	next_offset; +}; + +/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */ +struct hwrm_dbg_coredump_retrieve_input { +	__le16	req_type; +	__le16	cmpl_ring; +	__le16	seq_id; +	__le16	target_id; +	__le64	resp_addr; +	__le64	host_dest_addr; +	__le32	host_buf_len; +	__le32	unused_0; +	__le16	component_id; +	__le16	segment_id; +	__le16	instance; +	__le16	unused_1; +	u8	seg_flags; +	u8	unused_2; +	__le16	unused_3; +	__le32	unused_4; +	__le32	seq_no; +	__le32	unused_5; +}; + +/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */ +struct hwrm_dbg_coredump_retrieve_output { +	__le16	error_code; +	__le16	req_type; +	__le16	seq_id; +	__le16	resp_len; +	u8	flags; +	#define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE     0x1UL +	u8	unused_0; +	__le16	data_len; +	u8	unused_1[3]; +	u8	valid; +}; +  /* hwrm_nvm_read_input (size:320b/40B) */  struct hwrm_nvm_read_input {  	__le16	req_type; @@ -6269,12 +6883,14 @@ struct hwrm_nvm_set_variable_input {  	__le16	index_2;  	__le16	index_3;  	u8	flags; -	#define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH           0x1UL -	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK     0xeUL -	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT      1 -	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE       (0x0UL << 1) -	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1  (0x1UL << 1) -	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST      NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 +	#define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH                0x1UL +	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK          0xeUL +	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT           1 +	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE            (0x0UL << 1) +	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1       (0x1UL << 1) +	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256          (0x2UL << 1) +	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH  (0x3UL << 1) +	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST           NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH  	u8	unused_0;  }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index a64910892c25..6d583bcd2a81 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -447,7 +447,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)  	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;  	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;  	struct bnxt_pf_info *pf = &bp->pf; -	int i, rc = 0; +	int i, rc = 0, min = 1;  	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); @@ -464,14 +464,19 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)  	req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);  	req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); -	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) { -		req.min_cmpl_rings = cpu_to_le16(1); -		req.min_tx_rings = cpu_to_le16(1); -		req.min_rx_rings = cpu_to_le16(1); -		req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX); -		req.min_vnics = cpu_to_le16(1); -		req.min_stat_ctx = cpu_to_le16(1); -		req.min_hw_ring_grps = cpu_to_le16(1); +	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { +		min = 0; +		req.min_rsscos_ctx = cpu_to_le16(min); +	} +	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || +	    pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { +		req.min_cmpl_rings = cpu_to_le16(min); +		req.min_tx_rings = cpu_to_le16(min); +		req.min_rx_rings = cpu_to_le16(min); +		req.min_l2_ctxs = cpu_to_le16(min); +		req.min_vnics = cpu_to_le16(min); +		req.min_stat_ctx = cpu_to_le16(min); +		req.min_hw_ring_grps = cpu_to_le16(min);  	} else {  		vf_cp_rings /= num_vfs;  		vf_tx_rings /= num_vfs; @@ -618,7 +623,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)  static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)  { -	if (bp->flags & BNXT_FLAG_NEW_RM) +	if (BNXT_NEW_RM(bp))  		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);  	else  		return bnxt_hwrm_func_cfg(bp, num_vfs); @@ -956,9 +961,13 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)  	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {  		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))  			mac_ok = true; -	} else if (bp->hwrm_spec_code < 0x10202) { -		mac_ok = true;  	} else { +		/* There are two cases: +		 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded +		 *   to the PF and so it doesn't have to match +		 * 2.Allow VF to modify it's own MAC when PF has not assigned a +		 *   valid MAC address and firmware spec >= 0x10202 +		 */  		mac_ok = true;  	}  	if (mac_ok) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 795f45024c20..139d96c5a023 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -27,6 +27,15 @@  #define BNXT_FID_INVALID			0xffff  #define VLAN_TCI(vid, prio)	((vid) | ((prio) << VLAN_PRIO_SHIFT)) +#define is_vlan_pcp_wildcarded(vlan_tci_mask)	\ +	((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000) +#define is_vlan_pcp_exactmatch(vlan_tci_mask)	\ +	((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK) +#define is_vlan_pcp_zero(vlan_tci)	\ +	((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000) +#define is_vid_exactmatch(vlan_tci_mask)	\ +	((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK) +  /* Return the dst fid of the func for flow forwarding   * For PFs: src_fid is the fid of the PF   * For VF-reps: src_fid the fid of the VF @@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len)  	return true;  } +static bool is_vlan_tci_allowed(__be16  vlan_tci_mask, +				__be16  vlan_tci) +{ +	/* VLAN priority must be either exactly zero or fully wildcarded and +	 * VLAN id must be exact match. +	 */ +	if (is_vid_exactmatch(vlan_tci_mask) && +	    ((is_vlan_pcp_exactmatch(vlan_tci_mask) && +	      is_vlan_pcp_zero(vlan_tci)) || +	     is_vlan_pcp_wildcarded(vlan_tci_mask))) +		return true; + +	return false; +} +  static bool bits_set(void *key, int len)  {  	const u8 *p = key; @@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)  	/* Currently VLAN fields cannot be partial wildcard */  	if (bits_set(&flow->l2_key.inner_vlan_tci,  		     sizeof(flow->l2_key.inner_vlan_tci)) && -	    !is_exactmatch(&flow->l2_mask.inner_vlan_tci, -			   sizeof(flow->l2_mask.inner_vlan_tci))) { -		netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n"); +	    !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci, +				 flow->l2_key.inner_vlan_tci)) { +		netdev_info(bp->dev, "Unsupported VLAN TCI\n");  		return false;  	}  	if (bits_set(&flow->l2_key.inner_vlan_tpid, @@ -1544,22 +1568,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp)  int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,  			 struct tc_cls_flower_offload *cls_flower)  { -	int rc = 0; -  	switch (cls_flower->command) {  	case TC_CLSFLOWER_REPLACE: -		rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); -		break; - +		return bnxt_tc_add_flow(bp, src_fid, cls_flower);  	case TC_CLSFLOWER_DESTROY: -		rc = bnxt_tc_del_flow(bp, cls_flower); -		break; - +		return bnxt_tc_del_flow(bp, cls_flower);  	case TC_CLSFLOWER_STATS: -		rc = bnxt_tc_get_flow_stats(bp, cls_flower); -		break; +		return bnxt_tc_get_flow_stats(bp, cls_flower); +	default: +		return -EOPNOTSUPP;  	} -	return rc;  }  static const struct rhashtable_params bnxt_tc_flow_ht_params = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 347e4f946eb2..c37b2842f972 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -141,7 +141,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,  	if (avail_msix > num_msix)  		avail_msix = num_msix; -	if (bp->flags & BNXT_FLAG_NEW_RM) { +	if (BNXT_NEW_RM(bp)) {  		idx = bp->cp_nr_rings;  	} else {  		max_idx = min_t(int, bp->total_irqs, max_cp_rings); @@ -162,14 +162,13 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,  		return -EAGAIN;  	} -	if (bp->flags & BNXT_FLAG_NEW_RM) { +	if (BNXT_NEW_RM(bp)) {  		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;  		avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;  		edev->ulp_tbl[ulp_id].msix_requested = avail_msix;  	}  	bnxt_fill_msix_vecs(bp, ent); -	bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);  	bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);  	edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;  	return avail_msix; @@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)  	msix_requested = edev->ulp_tbl[ulp_id].msix_requested;  	bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);  	edev->ulp_tbl[ulp_id].msix_requested = 0; -	bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);  	edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;  	if (netif_running(dev)) {  		bnxt_close_nic(bp, true, false); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 05d405905906..e31f5d803c13 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,  	case TC_BLOCK_BIND:  		return tcf_block_cb_register(f->block,  					     bnxt_vf_rep_setup_tc_block_cb, -					     vf_rep, vf_rep); +					     vf_rep, vf_rep, f->extack);  	case TC_BLOCK_UNBIND:  		tcf_block_cb_unregister(f->block,  					bnxt_vf_rep_setup_tc_block_cb, vf_rep); @@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)  		break;  	case DEVLINK_ESWITCH_MODE_SWITCHDEV: +		if (bp->hwrm_spec_code < 0x10803) { +			netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n"); +			rc = -ENOTSUPP; +			goto done; +		} +  		if (pci_num_vf(bp->pdev) == 0) { -			netdev_info(bp->dev, -				    "Enable VFs before setting switchdev mode"); +			netdev_info(bp->dev, "Enable VFs before setting switchdev mode");  			rc = -EPERM;  			goto done;  		} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 1f0e872d0667..0584d07c8c33 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)  		rc = bnxt_xdp_set(bp, xdp->prog);  		break;  	case XDP_QUERY_PROG: -		xdp->prog_attached = !!bp->xdp_prog;  		xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;  		rc = 0;  		break; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 30273a7717e2..d83233ae4a15 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,  	id_tbl->max = size;  	id_tbl->next = next;  	spin_lock_init(&id_tbl->lock); -	id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); +	id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);  	if (!id_tbl->table)  		return -ENOMEM; @@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)  static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)  { -	struct fcoe_kwqe_destroy *req;  	union l5cm_specific_data l5_data;  	struct cnic_local *cp = dev->cnic_priv;  	struct bnx2x *bp = netdev_priv(dev->netdev); @@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)  	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); -	req = (struct fcoe_kwqe_destroy *) kwqe;  	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);  	memset(&l5_data, 0, sizeof(l5_data)); @@ -4090,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev)  {  	struct cnic_local *cp = dev->cnic_priv; -	kfree(cp->csk_tbl); +	kvfree(cp->csk_tbl);  	cp->csk_tbl = NULL;  	cnic_free_id_tbl(&cp->csk_port_tbl);  } @@ -4100,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)  	struct cnic_local *cp = dev->cnic_priv;  	u32 port_id; -	cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), -			      GFP_KERNEL); +	cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), +			       GFP_KERNEL);  	if (!cp->csk_tbl)  		return -ENOMEM; @@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)  	struct cnic_local *cp = dev->cnic_priv;  	struct bnx2x *bp = netdev_priv(dev->netdev);  	struct cnic_eth_dev *ethdev = cp->ethdev; -	int func, ret; +	int ret;  	u32 pfid;  	dev->stats_addr = ethdev->addr_drv_info_to_mcp;  	cp->func = bp->pf_num; -	func = CNIC_FUNC(cp);  	pfid = bp->pfid;  	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3be87efdc93d..e6f28c7942ab 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6,11 +6,15 @@   * Copyright (C) 2004 Sun Microsystems Inc.   * Copyright (C) 2005-2016 Broadcom Corporation.   * Copyright (C) 2016-2017 Broadcom Limited. + * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" + * refers to Broadcom Inc. and/or its subsidiaries.   *   * Firmware is:   *	Derived from proprietary unpublished source code,   *	Copyright (C) 2000-2016 Broadcom Corporation.   *	Copyright (C) 2016-2017 Broadcom Ltd. + *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" + *	refers to Broadcom Inc. and/or its subsidiaries.   *   *	Permission is hereby granted for the distribution of this firmware   *	data in hexadecimal or equivalent format, provided this copyright @@ -50,6 +54,7 @@  #include <linux/ssb/ssb_driver_gige.h>  #include <linux/hwmon.h>  #include <linux/hwmon-sysfs.h> +#include <linux/crc32poly.h>  #include <net/checksum.h>  #include <net/ip.h> @@ -721,6 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)  	case TG3_APE_LOCK_GPIO:  		if (tg3_asic_rev(tp) == ASIC_REV_5761)  			return 0; +		/* else: fall through */  	case TG3_APE_LOCK_GRC:  	case TG3_APE_LOCK_MEM:  		if (!tp->pci_fn) @@ -781,6 +787,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)  	case TG3_APE_LOCK_GPIO:  		if (tg3_asic_rev(tp) == ASIC_REV_5761)  			return; +		/* else: fall through */  	case TG3_APE_LOCK_GRC:  	case TG3_APE_LOCK_MEM:  		if (!tp->pci_fn) @@ -9290,6 +9297,15 @@ static int tg3_chip_reset(struct tg3 *tp)  	tg3_restore_clk(tp); +	/* Increase the core clock speed to fix tx timeout issue for 5762 +	 * with 100Mbps link speed. +	 */ +	if (tg3_asic_rev(tp) == ASIC_REV_5762) { +		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); +		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | +		     TG3_CPMU_MAC_ORIDE_ENABLE); +	} +  	/* Reprobe ASF enable state.  */  	tg3_flag_clear(tp, ENABLE_ASF);  	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | @@ -9707,7 +9723,7 @@ static inline u32 calc_crc(unsigned char *buf, int len)  			reg >>= 1;  			if (tmp) -				reg ^= 0xedb88320; +				reg ^= CRC32_POLY_LE;  		}  	} @@ -10706,28 +10722,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)  	switch (limit) {  	case 16:  		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0); +		/* fall through */  	case 15:  		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0); +		/* fall through */  	case 14:  		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0); +		/* fall through */  	case 13:  		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0); +		/* fall through */  	case 12:  		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0); +		/* fall through */  	case 11:  		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0); +		/* fall through */  	case 10:  		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0); +		/* fall through */  	case 9:  		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0); +		/* fall through */  	case 8:  		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0); +		/* fall through */  	case 7:  		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0); +		/* fall through */  	case 6:  		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0); +		/* fall through */  	case 5:  		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0); +		/* fall through */  	case 4:  		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */  	case 3: diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 1d61aa3efda1..a772a33b685c 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -7,6 +7,8 @@   * Copyright (C) 2004 Sun Microsystems Inc.   * Copyright (C) 2007-2016 Broadcom Corporation.   * Copyright (C) 2016-2017 Broadcom Limited. + * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom" + * refers to Broadcom Inc. and/or its subsidiaries.   */  #ifndef _T3_H |