diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
128 files changed, 10112 insertions, 7148 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 3facb55b7161..a3c84bf05e44 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -337,6 +337,9 @@ config ICE_HWTS  	  the PTP clock driver precise cross-timestamp ioctl  	  (PTP_SYS_OFFSET_PRECISE). +config ICE_GNSS +	def_bool GNSS = y || GNSS = ICE +  config FM10K  	tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"  	default n diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 61e60e4de600..da6e303ad99b 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -4229,8 +4229,6 @@ process_skb:  				 */  				p = buffer_info->rxbuf.page;  				if (length <= copybreak) { -					u8 *vaddr; -  					if (likely(!(netdev->features & NETIF_F_RXFCS)))  						length -= 4;  					skb = e1000_alloc_rx_skb(adapter, @@ -4238,10 +4236,9 @@ process_skb:  					if (!skb)  						break; -					vaddr = kmap_atomic(p); -					memcpy(skb_tail_pointer(skb), vaddr, -					       length); -					kunmap_atomic(vaddr); +					memcpy(skb_tail_pointer(skb), +					       page_address(p), length); +  					/* re-use the page, so don't erase  					 * buffer_info->rxbuf.page  					 */ diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index 44e58b6e7660..0baa15503c38 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -5,6 +5,9 @@  # Makefile for the Intel(R) PRO/1000 ethernet driver  # +ccflags-y += -I$(src) +subdir-ccflags-y += -I$(src) +  obj-$(CONFIG_E1000E) += e1000e.o  e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index e8a9a9610ac6..a187582d2299 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -116,7 +116,8 @@ enum e1000_boards {  	board_pch_spt,  	board_pch_cnp,  	board_pch_tgp, -	board_pch_adp +	board_pch_adp, +	board_pch_mtp  };  struct e1000_ps_page { @@ -504,6 +505,7 @@ extern const struct e1000_info e1000_pch_spt_info;  extern const struct e1000_info e1000_pch_cnp_info;  extern const struct e1000_info e1000_pch_tgp_info;  extern const struct e1000_info e1000_pch_adp_info; +extern const struct e1000_info e1000_pch_mtp_info;  extern const struct e1000_info e1000_es2_info;  void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/e1000e_trace.h b/drivers/net/ethernet/intel/e1000e/e1000e_trace.h new file mode 100644 index 000000000000..19d3cf4d924e --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/e1000e_trace.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022, Intel Corporation. */ +/* Modeled on trace-events-sample.h */ +/* The trace subsystem name for e1000e will be "e1000e_trace". + * + * This file is named e1000e_trace.h. + * + * Since this include file's name is different from the trace + * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end + * of this file. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM e1000e_trace + +#if !defined(_TRACE_E1000E_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_E1000E_TRACE_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(e1000e_trace_mac_register, +	    TP_PROTO(uint32_t reg), +	    TP_ARGS(reg), +	    TP_STRUCT__entry(__field(uint32_t,	reg)), +	    TP_fast_assign(__entry->reg = reg;), +	    TP_printk("event: TraceHub e1000e mac register: 0x%08x", +		      __entry->reg) +); + +#endif +/* This must be outside ifdef _E1000E_TRACE_H */ +/* This trace include file is not located in the .../include/trace + * with the kernel tracepoint definitions, because we're a loadable + * module. + */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE e1000e_trace + +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 51a5afe9df2f..721f86fd5802 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -110,9 +110,9 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {  static int e1000_get_link_ksettings(struct net_device *netdev,  				    struct ethtool_link_ksettings *cmd)  { +	u32 speed, supported, advertising, lp_advertising, lpa_t;  	struct e1000_adapter *adapter = netdev_priv(netdev);  	struct e1000_hw *hw = &adapter->hw; -	u32 speed, supported, advertising;  	if (hw->phy.media_type == e1000_media_type_copper) {  		supported = (SUPPORTED_10baseT_Half | @@ -120,7 +120,9 @@ static int e1000_get_link_ksettings(struct net_device *netdev,  			     SUPPORTED_100baseT_Half |  			     SUPPORTED_100baseT_Full |  			     SUPPORTED_1000baseT_Full | +			     SUPPORTED_Asym_Pause |  			     SUPPORTED_Autoneg | +			     SUPPORTED_Pause |  			     SUPPORTED_TP);  		if (hw->phy.type == e1000_phy_ife)  			supported &= ~SUPPORTED_1000baseT_Full; @@ -192,10 +194,16 @@ static int e1000_get_link_ksettings(struct net_device *netdev,  	if (hw->phy.media_type != e1000_media_type_copper)  		cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; +	lpa_t = mii_stat1000_to_ethtool_lpa_t(adapter->phy_regs.stat1000); +	lp_advertising = lpa_t | +	mii_lpa_to_ethtool_lpa_t(adapter->phy_regs.lpa); +  	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,  						supported);  	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,  						advertising); +	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, +						lp_advertising);  	return 0;  } @@ -908,6 +916,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		mask |= BIT(18);  		break;  	default: @@ -1575,6 +1584,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		fext_nvm11 = er32(FEXTNVM11);  		fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;  		ew32(FEXTNVM11, fext_nvm11); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index bcf680e83811..29f9fae35f42 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -114,6 +114,14 @@ struct e1000_hw;  #define E1000_DEV_ID_PCH_LNP_I219_V20		0x550F  #define E1000_DEV_ID_PCH_LNP_I219_LM21		0x5510  #define E1000_DEV_ID_PCH_LNP_I219_V21		0x5511 +#define E1000_DEV_ID_PCH_ARL_I219_LM24		0x57A0 +#define E1000_DEV_ID_PCH_ARL_I219_V24		0x57A1 +#define E1000_DEV_ID_PCH_PTP_I219_LM25		0x57B3 +#define E1000_DEV_ID_PCH_PTP_I219_V25		0x57B4 +#define E1000_DEV_ID_PCH_PTP_I219_LM26		0x57B5 +#define E1000_DEV_ID_PCH_PTP_I219_V26		0x57B6 +#define E1000_DEV_ID_PCH_PTP_I219_LM27		0x57B7 +#define E1000_DEV_ID_PCH_PTP_I219_V27		0x57B8  #define E1000_REVISION_4	4 @@ -141,6 +149,7 @@ enum e1000_mac_type {  	e1000_pch_adp,  	e1000_pch_mtp,  	e1000_pch_lnp, +	e1000_pch_ptp,  };  enum e1000_media_type { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 9466f65a6da7..0c7fd10312c8 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -322,6 +322,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		if (e1000_phy_is_accessible_pchlan(hw))  			break; @@ -468,6 +469,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)  		case e1000_pch_adp:  		case e1000_pch_mtp:  		case e1000_pch_lnp: +		case e1000_pch_ptp:  			/* In case the PHY needs to be in mdio slow mode,  			 * set slow mode and try to get the PHY id again.  			 */ @@ -714,6 +716,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  	case e1000_pchlan:  		/* check management mode */  		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -1681,6 +1684,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		rc = e1000_init_phy_params_pchlan(hw);  		break;  	default: @@ -2137,6 +2141,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;  		break;  	default: @@ -3182,6 +3187,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		bank1_offset = nvm->flash_bank_size;  		act_offset = E1000_ICH_NVM_SIG_WORD; @@ -4122,6 +4128,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		word = NVM_COMPAT;  		valid_csum_mask = NVM_COMPAT_VALID_CSUM;  		break; @@ -6041,3 +6048,23 @@ const struct e1000_info e1000_pch_adp_info = {  	.phy_ops		= &ich8_phy_ops,  	.nvm_ops		= &spt_nvm_ops,  }; + +const struct e1000_info e1000_pch_mtp_info = { +	.mac			= e1000_pch_mtp, +	.flags			= FLAG_IS_ICH +				  | FLAG_HAS_WOL +				  | FLAG_HAS_HW_TIMESTAMP +				  | FLAG_HAS_CTRLEXT_ON_LOAD +				  | FLAG_HAS_AMT +				  | FLAG_HAS_FLASH +				  | FLAG_HAS_JUMBO_FRAMES +				  | FLAG_APME_IN_WUC, +	.flags2			= FLAG2_HAS_PHY_STATS +				  | FLAG2_HAS_EEE, +	.pba			= 26, +	.max_hw_frame_size	= 9022, +	.get_variants		= e1000_get_variants_ich8lan, +	.mac_ops		= &ich8_mac_ops, +	.phy_ops		= &ich8_phy_ops, +	.nvm_ops		= &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 49e926959ad3..e1eb1de88bf9 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -28,6 +28,8 @@  #include <linux/suspend.h>  #include "e1000.h" +#define CREATE_TRACE_POINTS +#include "e1000e_trace.h"  char e1000e_driver_name[] = "e1000e"; @@ -53,6 +55,7 @@ static const struct e1000_info *e1000_info_tbl[] = {  	[board_pch_cnp]		= &e1000_pch_cnp_info,  	[board_pch_tgp]		= &e1000_pch_tgp_info,  	[board_pch_adp]		= &e1000_pch_adp_info, +	[board_pch_mtp]		= &e1000_pch_mtp_info,  };  struct e1000_reg_info { @@ -1388,26 +1391,18 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,  			/* page alloc/put takes too long and effects small  			 * packet throughput, so unsplit small packets and -			 * save the alloc/put only valid in softirq (napi) -			 * context to call kmap_* +			 * save the alloc/put  			 */  			if (l1 && (l1 <= copybreak) &&  			    ((length + l1) <= adapter->rx_ps_bsize0)) { -				u8 *vaddr; -  				ps_page = &buffer_info->ps_pages[0]; -				/* there is no documentation about how to call -				 * kmap_atomic, so we can't hold the mapping -				 * very long -				 */  				dma_sync_single_for_cpu(&pdev->dev,  							ps_page->dma,  							PAGE_SIZE,  							DMA_FROM_DEVICE); -				vaddr = kmap_atomic(ps_page->page); -				memcpy(skb_tail_pointer(skb), vaddr, l1); -				kunmap_atomic(vaddr); +				memcpy(skb_tail_pointer(skb), +				       page_address(ps_page->page), l1);  				dma_sync_single_for_device(&pdev->dev,  							   ps_page->dma,  							   PAGE_SIZE, @@ -1607,11 +1602,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,  				 */  				if (length <= copybreak &&  				    skb_tailroom(skb) >= length) { -					u8 *vaddr; -					vaddr = kmap_atomic(buffer_info->page); -					memcpy(skb_tail_pointer(skb), vaddr, +					memcpy(skb_tail_pointer(skb), +					       page_address(buffer_info->page),  					       length); -					kunmap_atomic(vaddr);  					/* re-use the page, so don't erase  					 * buffer_info->page  					 */ @@ -3552,6 +3545,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {  			/* Stable 24MHz frequency */  			incperiod = INCPERIOD_24MHZ; @@ -4067,6 +4061,7 @@ void e1000e_reset(struct e1000_adapter *adapter)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		fc->refresh_time = 0xFFFF;  		fc->pause_time = 0xFFFF; @@ -5936,9 +5931,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,  		e1000_tx_queue(tx_ring, tx_flags, count);  		/* Make sure there is space in the ring for the next send. */  		e1000_maybe_stop_tx(tx_ring, -				    (MAX_SKB_FRAGS * +				    ((MAX_SKB_FRAGS + 1) *  				     DIV_ROUND_UP(PAGE_SIZE, -						  adapter->tx_fifo_limit) + 2)); +						  adapter->tx_fifo_limit) + 4));  		if (!netdev_xmit_more() ||  		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { @@ -6348,6 +6343,7 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)  		mac_data = er32(H2ME);  		mac_data |= E1000_H2ME_START_DPG;  		mac_data &= ~E1000_H2ME_EXIT_DPG; +		trace_e1000e_trace_mac_register(mac_data);  		ew32(H2ME, mac_data);  	} else {  		/* Request driver configure the device to S0ix */ @@ -6502,6 +6498,7 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)  		mac_data = er32(H2ME);  		mac_data &= ~E1000_H2ME_START_DPG;  		mac_data |= E1000_H2ME_EXIT_DPG; +		trace_e1000e_trace_mac_register(mac_data);  		ew32(H2ME, mac_data);  		/* Poll up to 2.5 seconds for ME to unconfigure DPG. @@ -7421,9 +7418,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	if (err)  		goto err_pci_reg; -	/* AER (Advanced Error Reporting) hooks */ -	pci_enable_pcie_error_reporting(pdev); -  	pci_set_master(pdev);  	/* PCI config space info */  	err = pci_save_state(pdev); @@ -7711,7 +7705,6 @@ err_flashmap:  err_ioremap:  	free_netdev(netdev);  err_alloc_etherdev: -	pci_disable_pcie_error_reporting(pdev);  	pci_release_mem_regions(pdev);  err_pci_reg:  err_dma: @@ -7778,9 +7771,6 @@ static void e1000_remove(struct pci_dev *pdev)  	free_netdev(netdev); -	/* AER disable */ -	pci_disable_pcie_error_reporting(pdev); -  	pci_disable_device(pdev);  } @@ -7905,14 +7895,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },  	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp }, -	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_LM24), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_V24), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM25), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V25), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM26), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V26), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM27), board_pch_mtp }, +	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V27), board_pch_mtp },  	{ 0, 0, 0, 0, 0, 0, 0 }	/* terminate list */  }; diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 060b263348ce..08c3d477dd6f 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -2,6 +2,7 @@  /* Copyright(c) 1999 - 2018 Intel Corporation. */  #include "e1000.h" +#include <linux/ethtool.h>  static s32 e1000_wait_autoneg(struct e1000_hw *hw);  static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, @@ -1011,6 +1012,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)  		 */  		mii_autoneg_adv_reg &=  		    ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); +		phy->autoneg_advertised &= +		    ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);  		break;  	case e1000_fc_rx_pause:  		/* Rx Flow control is enabled, and Tx Flow control is @@ -1024,6 +1027,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)  		 */  		mii_autoneg_adv_reg |=  		    (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); +		phy->autoneg_advertised |= +		    (ADVERTISED_Pause | ADVERTISED_Asym_Pause);  		break;  	case e1000_fc_tx_pause:  		/* Tx Flow control is enabled, and Rx Flow control is @@ -1031,6 +1036,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)  		 */  		mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM;  		mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; +		phy->autoneg_advertised |= ADVERTISED_Asym_Pause; +		phy->autoneg_advertised &= ~ADVERTISED_Pause;  		break;  	case e1000_fc_full:  		/* Flow control (both Rx and Tx) is enabled by a software @@ -1038,6 +1045,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)  		 */  		mii_autoneg_adv_reg |=  		    (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); +		phy->autoneg_advertised |= +		    (ADVERTISED_Pause | ADVERTISED_Asym_Pause);  		break;  	default:  		e_dbg("Flow control param set incorrectly\n"); diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 0e488e4fa5c1..def4566a916f 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -29,17 +29,11 @@ static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta)  	struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,  						     ptp_clock_info);  	struct e1000_hw *hw = &adapter->hw; -	bool neg_adj = false;  	unsigned long flags; -	u64 adjustment; -	u32 timinca, incvalue; +	u64 incvalue; +	u32 timinca;  	s32 ret_val; -	if (delta < 0) { -		neg_adj = true; -		delta = -delta; -	} -  	/* Get the System Time Register SYSTIM base frequency */  	ret_val = e1000e_get_base_timinca(adapter, &timinca);  	if (ret_val) @@ -48,11 +42,7 @@ static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta)  	spin_lock_irqsave(&adapter->systim_lock, flags);  	incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; - -	adjustment = mul_u64_u64_div_u64(incvalue, (u64)delta, -					 1000000ULL << 16); - -	incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment); +	incvalue = adjust_by_scaled_ppm(incvalue, delta);  	timinca &= ~E1000_TIMINCA_INCVALUE_MASK;  	timinca |= incvalue; @@ -297,6 +287,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)  	case e1000_pch_adp:  	case e1000_pch_mtp:  	case e1000_pch_lnp: +	case e1000_pch_ptp:  		if ((hw->mac.type < e1000_pch_lpt) ||  		    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {  			adapter->ptp_clock_info.max_adj = 24000000 - 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 2cca9e84e31e..34ab5ff9823b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1229,10 +1229,10 @@ static void fm10k_get_stats64(struct net_device *netdev,  			continue;  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			packets = ring->stats.packets;  			bytes   = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		stats->rx_packets += packets;  		stats->rx_bytes   += bytes; @@ -1245,10 +1245,10 @@ static void fm10k_get_stats64(struct net_device *netdev,  			continue;  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			packets = ring->stats.packets;  			bytes   = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		stats->tx_packets += packets;  		stats->tx_bytes   += bytes; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index b473cb7d7c57..027d721feb18 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2127,8 +2127,6 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		goto err_pci_reg;  	} -	pci_enable_pcie_error_reporting(pdev); -  	pci_set_master(pdev);  	pci_save_state(pdev); @@ -2227,7 +2225,6 @@ err_sw_init:  err_ioremap:  	free_netdev(netdev);  err_alloc_netdev: -	pci_disable_pcie_error_reporting(pdev);  	pci_release_mem_regions(pdev);  err_pci_reg:  err_dma: @@ -2281,8 +2278,6 @@ static void fm10k_remove(struct pci_dev *pdev)  	pci_release_mem_regions(pdev); -	pci_disable_pcie_error_reporting(pdev); -  	pci_disable_device(pdev);  } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 9a60d6b207f7..60ce4d15d82a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -33,6 +33,7 @@  #include <linux/net_tstamp.h>  #include <linux/ptp_clock_kernel.h>  #include <net/pkt_cls.h> +#include <net/pkt_sched.h>  #include <net/tc_act/tc_gact.h>  #include <net/tc_act/tc_mirred.h>  #include <net/udp_tunnel.h> @@ -176,7 +177,7 @@ enum i40e_interrupt_policy {  struct i40e_lump_tracking {  	u16 num_entries; -	u16 list[0]; +	u16 list[];  #define I40E_PILE_VALID_BIT  0x8000  #define I40E_IWARP_IRQ_PILE_ID  (I40E_PILE_VALID_BIT - 2)  }; @@ -992,6 +993,7 @@ struct i40e_q_vector {  	struct rcu_head rcu;	/* to avoid race with update stats on free */  	char name[I40E_INT_NAME_STR_LEN];  	bool arm_wb_state; +	int irq_num;		/* IRQ assigned to this q_vector */  } ____cacheline_internodealigned_in_smp;  /* lan device */ @@ -1286,9 +1288,9 @@ void i40e_ptp_stop(struct i40e_pf *pf);  int i40e_ptp_alloc_pins(struct i40e_pf *pf);  int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);  int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); -i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf); -i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); -i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf); +int i40e_get_partition_bw_setting(struct i40e_pf *pf); +int i40e_set_partition_bw_setting(struct i40e_pf *pf); +int i40e_commit_partition_bw_setting(struct i40e_pf *pf);  void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);  void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 42439f725aa4..86fac8f959bb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -47,9 +47,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)   *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings   *  @hw: pointer to the hardware structure   **/ -static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)  { -	i40e_status ret_code; +	int ret_code;  	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,  					 i40e_mem_atq_ring, @@ -74,9 +74,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)   *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings   *  @hw: pointer to the hardware structure   **/ -static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)  { -	i40e_status ret_code; +	int ret_code;  	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,  					 i40e_mem_arq_ring, @@ -115,11 +115,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)   *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue   *  @hw: pointer to the hardware structure   **/ -static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +static int i40e_alloc_arq_bufs(struct i40e_hw *hw)  { -	i40e_status ret_code;  	struct i40e_aq_desc *desc;  	struct i40e_dma_mem *bi; +	int ret_code;  	int i;  	/* We'll be allocating the buffer info memory first, then we can @@ -182,10 +182,10 @@ unwind_alloc_arq_bufs:   *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue   *  @hw: pointer to the hardware structure   **/ -static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +static int i40e_alloc_asq_bufs(struct i40e_hw *hw)  { -	i40e_status ret_code;  	struct i40e_dma_mem *bi; +	int ret_code;  	int i;  	/* No mapped memory needed yet, just the buffer info structures */ @@ -266,9 +266,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)   *   *  Configure base address and length registers for the transmit queue   **/ -static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +static int i40e_config_asq_regs(struct i40e_hw *hw)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	u32 reg = 0;  	/* Clear Head and Tail */ @@ -295,9 +295,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)   *   * Configure base address and length registers for the receive (event queue)   **/ -static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +static int i40e_config_arq_regs(struct i40e_hw *hw)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	u32 reg = 0;  	/* Clear Head and Tail */ @@ -334,9 +334,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)   *  Do *NOT* hold the lock when calling this as the memory allocation routines   *  called are not going to be atomic context safe   **/ -static i40e_status i40e_init_asq(struct i40e_hw *hw) +static int i40e_init_asq(struct i40e_hw *hw)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	if (hw->aq.asq.count > 0) {  		/* queue already initialized */ @@ -393,9 +393,9 @@ init_adminq_exit:   *  Do *NOT* hold the lock when calling this as the memory allocation routines   *  called are not going to be atomic context safe   **/ -static i40e_status i40e_init_arq(struct i40e_hw *hw) +static int i40e_init_arq(struct i40e_hw *hw)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	if (hw->aq.arq.count > 0) {  		/* queue already initialized */ @@ -445,9 +445,9 @@ init_adminq_exit:   *   *  The main shutdown routine for the Admin Send Queue   **/ -static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +static int i40e_shutdown_asq(struct i40e_hw *hw)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	mutex_lock(&hw->aq.asq_mutex); @@ -479,9 +479,9 @@ shutdown_asq_out:   *   *  The main shutdown routine for the Admin Receive Queue   **/ -static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +static int i40e_shutdown_arq(struct i40e_hw *hw)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	mutex_lock(&hw->aq.arq_mutex); @@ -582,12 +582,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)   *     - hw->aq.arq_buf_size   *     - hw->aq.asq_buf_size   **/ -i40e_status i40e_init_adminq(struct i40e_hw *hw) +int i40e_init_adminq(struct i40e_hw *hw)  {  	u16 cfg_ptr, oem_hi, oem_lo;  	u16 eetrack_lo, eetrack_hi; -	i40e_status ret_code;  	int retry = 0; +	int ret_code;  	/* verify input for valid configuration */  	if ((hw->aq.num_arq_entries == 0) || @@ -780,7 +780,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)   *  This is the main send command driver routine for the Admin Queue send   *  queue.  It runs the queue, cleans the queue, etc   **/ -static i40e_status +static int  i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,  				  struct i40e_aq_desc *desc,  				  void *buff, /* can be NULL */ @@ -788,12 +788,12 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,  				  struct i40e_asq_cmd_details *cmd_details,  				  bool is_atomic_context)  { -	i40e_status status = 0;  	struct i40e_dma_mem *dma_buff = NULL;  	struct i40e_asq_cmd_details *details;  	struct i40e_aq_desc *desc_on_ring;  	bool cmd_completed = false;  	u16  retval = 0; +	int status = 0;  	u32  val = 0;  	if (hw->aq.asq.count == 0) { @@ -984,7 +984,7 @@ asq_send_command_error:   *  Acquires the lock and calls the main send command execution   *  routine.   **/ -i40e_status +int  i40e_asq_send_command_atomic(struct i40e_hw *hw,  			     struct i40e_aq_desc *desc,  			     void *buff, /* can be NULL */ @@ -992,7 +992,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,  			     struct i40e_asq_cmd_details *cmd_details,  			     bool is_atomic_context)  { -	i40e_status status; +	int status;  	mutex_lock(&hw->aq.asq_mutex);  	status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size, @@ -1003,7 +1003,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,  	return status;  } -i40e_status +int  i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,  		      void *buff, /* can be NULL */ u16  buff_size,  		      struct i40e_asq_cmd_details *cmd_details) @@ -1026,7 +1026,7 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,   *  routine. Returns the last Admin Queue status in aq_status   *  to avoid race conditions in access to hw->aq.asq_last_status.   **/ -i40e_status +int  i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,  				struct i40e_aq_desc *desc,  				void *buff, /* can be NULL */ @@ -1035,7 +1035,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,  				bool is_atomic_context,  				enum i40e_admin_queue_err *aq_status)  { -	i40e_status status; +	int status;  	mutex_lock(&hw->aq.asq_mutex);  	status = i40e_asq_send_command_atomic_exec(hw, desc, buff, @@ -1048,7 +1048,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,  	return status;  } -i40e_status +int  i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,  			 void *buff, /* can be NULL */ u16  buff_size,  			 struct i40e_asq_cmd_details *cmd_details, @@ -1084,14 +1084,14 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,   *  the contents through e.  It can also return how many events are   *  left to process through 'pending'   **/ -i40e_status i40e_clean_arq_element(struct i40e_hw *hw, -					     struct i40e_arq_event_info *e, -					     u16 *pending) +int i40e_clean_arq_element(struct i40e_hw *hw, +			   struct i40e_arq_event_info *e, +			   u16 *pending)  { -	i40e_status ret_code = 0;  	u16 ntc = hw->aq.arq.next_to_clean;  	struct i40e_aq_desc *desc;  	struct i40e_dma_mem *bi; +	int ret_code = 0;  	u16 desc_idx;  	u16 datalen;  	u16 flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 60f9e0a6aaca..3357d65a906b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1795,9 +1795,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);  /* Set Loopback mode (0x0618) */  struct i40e_aqc_set_lb_mode {  	__le16	lb_mode; -#define I40E_AQ_LB_PHY_LOCAL	0x01 -#define I40E_AQ_LB_PHY_REMOTE	0x02 -#define I40E_AQ_LB_MAC_LOCAL	0x04 +#define I40E_LEGACY_LOOPBACK_NVM_VER	0x6000 +#define I40E_AQ_LB_MAC_LOCAL		0x01 +#define I40E_AQ_LB_PHY_LOCAL		0x05 +#define I40E_AQ_LB_PHY_REMOTE		0x06 +#define I40E_AQ_LB_MAC_LOCAL_LEGACY	0x04  	u8	reserved[14];  }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index cb8689222c8b..a6c9a9e343d1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -20,16 +20,16 @@ enum i40e_memory_type {  };  /* prototype for functions used for dynamic memory allocation */ -i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, -					    struct i40e_dma_mem *mem, -					    enum i40e_memory_type type, -					    u64 size, u32 alignment); -i40e_status i40e_free_dma_mem(struct i40e_hw *hw, -					struct i40e_dma_mem *mem); -i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, -					     struct i40e_virt_mem *mem, -					     u32 size); -i40e_status i40e_free_virt_mem(struct i40e_hw *hw, -					 struct i40e_virt_mem *mem); +int i40e_allocate_dma_mem(struct i40e_hw *hw, +			  struct i40e_dma_mem *mem, +			  enum i40e_memory_type type, +			  u64 size, u32 alignment); +int i40e_free_dma_mem(struct i40e_hw *hw, +		      struct i40e_dma_mem *mem); +int i40e_allocate_virt_mem(struct i40e_hw *hw, +			   struct i40e_virt_mem *mem, +			   u32 size); +int i40e_free_virt_mem(struct i40e_hw *hw, +		       struct i40e_virt_mem *mem);  #endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 10d7a982a5b9..639c5a1ca853 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -541,9 +541,9 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,  {  	struct i40e_pf *pf = ldev->pf;  	struct i40e_hw *hw = &pf->hw; -	i40e_status err; +	int err; -	err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP, +	err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_RDMA,  				     0, msg, len, NULL);  	if (err)  		dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n", @@ -674,7 +674,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,  	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];  	struct i40e_vsi_context ctxt;  	bool update = true; -	i40e_status err; +	int err;  	/* TODO: for now do not allow setting VF's VSI setting */  	if (is_vf) @@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,  	ctxt.flags = I40E_AQ_VSI_TYPE_PF;  	if (err) {  		dev_info(&pf->pdev->dev, -			 "couldn't get PF vsi config, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, err), +			 "couldn't get PF vsi config, err %pe aq_err %s\n", +			 ERR_PTR(err),  			 i40e_aq_str(&pf->hw,  				     pf->hw.aq.asq_last_status));  		return -ENOENT; @@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,  		err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);  		if (err) {  			dev_info(&pf->pdev->dev, -				 "update VSI ctxt for PE failed, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, err), +				 "update VSI ctxt for PE failed, err %pe aq_err %s\n", +				 ERR_PTR(err),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  		} diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 4f01e2a6b6bb..ed88e38d488b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -14,9 +14,9 @@   * This function sets the mac type of the adapter based on the   * vendor ID and device ID stored in the hw structure.   **/ -i40e_status i40e_set_mac_type(struct i40e_hw *hw) +int i40e_set_mac_type(struct i40e_hw *hw)  { -	i40e_status status = 0; +	int status = 0;  	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {  		switch (hw->device_id) { @@ -125,154 +125,6 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)  }  /** - * i40e_stat_str - convert status err code to a string - * @hw: pointer to the HW structure - * @stat_err: the status error code to convert - **/ -const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) -{ -	switch (stat_err) { -	case 0: -		return "OK"; -	case I40E_ERR_NVM: -		return "I40E_ERR_NVM"; -	case I40E_ERR_NVM_CHECKSUM: -		return "I40E_ERR_NVM_CHECKSUM"; -	case I40E_ERR_PHY: -		return "I40E_ERR_PHY"; -	case I40E_ERR_CONFIG: -		return "I40E_ERR_CONFIG"; -	case I40E_ERR_PARAM: -		return "I40E_ERR_PARAM"; -	case I40E_ERR_MAC_TYPE: -		return "I40E_ERR_MAC_TYPE"; -	case I40E_ERR_UNKNOWN_PHY: -		return "I40E_ERR_UNKNOWN_PHY"; -	case I40E_ERR_LINK_SETUP: -		return "I40E_ERR_LINK_SETUP"; -	case I40E_ERR_ADAPTER_STOPPED: -		return "I40E_ERR_ADAPTER_STOPPED"; -	case I40E_ERR_INVALID_MAC_ADDR: -		return "I40E_ERR_INVALID_MAC_ADDR"; -	case I40E_ERR_DEVICE_NOT_SUPPORTED: -		return "I40E_ERR_DEVICE_NOT_SUPPORTED"; -	case I40E_ERR_PRIMARY_REQUESTS_PENDING: -		return "I40E_ERR_PRIMARY_REQUESTS_PENDING"; -	case I40E_ERR_INVALID_LINK_SETTINGS: -		return "I40E_ERR_INVALID_LINK_SETTINGS"; -	case I40E_ERR_AUTONEG_NOT_COMPLETE: -		return "I40E_ERR_AUTONEG_NOT_COMPLETE"; -	case I40E_ERR_RESET_FAILED: -		return "I40E_ERR_RESET_FAILED"; -	case I40E_ERR_SWFW_SYNC: -		return "I40E_ERR_SWFW_SYNC"; -	case I40E_ERR_NO_AVAILABLE_VSI: -		return "I40E_ERR_NO_AVAILABLE_VSI"; -	case I40E_ERR_NO_MEMORY: -		return "I40E_ERR_NO_MEMORY"; -	case I40E_ERR_BAD_PTR: -		return "I40E_ERR_BAD_PTR"; -	case I40E_ERR_RING_FULL: -		return "I40E_ERR_RING_FULL"; -	case I40E_ERR_INVALID_PD_ID: -		return "I40E_ERR_INVALID_PD_ID"; -	case I40E_ERR_INVALID_QP_ID: -		return "I40E_ERR_INVALID_QP_ID"; -	case I40E_ERR_INVALID_CQ_ID: -		return "I40E_ERR_INVALID_CQ_ID"; -	case I40E_ERR_INVALID_CEQ_ID: -		return "I40E_ERR_INVALID_CEQ_ID"; -	case I40E_ERR_INVALID_AEQ_ID: -		return "I40E_ERR_INVALID_AEQ_ID"; -	case I40E_ERR_INVALID_SIZE: -		return "I40E_ERR_INVALID_SIZE"; -	case I40E_ERR_INVALID_ARP_INDEX: -		return "I40E_ERR_INVALID_ARP_INDEX"; -	case I40E_ERR_INVALID_FPM_FUNC_ID: -		return "I40E_ERR_INVALID_FPM_FUNC_ID"; -	case I40E_ERR_QP_INVALID_MSG_SIZE: -		return "I40E_ERR_QP_INVALID_MSG_SIZE"; -	case I40E_ERR_QP_TOOMANY_WRS_POSTED: -		return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; -	case I40E_ERR_INVALID_FRAG_COUNT: -		return "I40E_ERR_INVALID_FRAG_COUNT"; -	case I40E_ERR_QUEUE_EMPTY: -		return "I40E_ERR_QUEUE_EMPTY"; -	case I40E_ERR_INVALID_ALIGNMENT: -		return "I40E_ERR_INVALID_ALIGNMENT"; -	case I40E_ERR_FLUSHED_QUEUE: -		return "I40E_ERR_FLUSHED_QUEUE"; -	case I40E_ERR_INVALID_PUSH_PAGE_INDEX: -		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; -	case I40E_ERR_INVALID_IMM_DATA_SIZE: -		return "I40E_ERR_INVALID_IMM_DATA_SIZE"; -	case I40E_ERR_TIMEOUT: -		return "I40E_ERR_TIMEOUT"; -	case I40E_ERR_OPCODE_MISMATCH: -		return "I40E_ERR_OPCODE_MISMATCH"; -	case I40E_ERR_CQP_COMPL_ERROR: -		return "I40E_ERR_CQP_COMPL_ERROR"; -	case I40E_ERR_INVALID_VF_ID: -		return "I40E_ERR_INVALID_VF_ID"; -	case I40E_ERR_INVALID_HMCFN_ID: -		return "I40E_ERR_INVALID_HMCFN_ID"; -	case I40E_ERR_BACKING_PAGE_ERROR: -		return "I40E_ERR_BACKING_PAGE_ERROR"; -	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: -		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; -	case I40E_ERR_INVALID_PBLE_INDEX: -		return "I40E_ERR_INVALID_PBLE_INDEX"; -	case I40E_ERR_INVALID_SD_INDEX: -		return "I40E_ERR_INVALID_SD_INDEX"; -	case I40E_ERR_INVALID_PAGE_DESC_INDEX: -		return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; -	case I40E_ERR_INVALID_SD_TYPE: -		return "I40E_ERR_INVALID_SD_TYPE"; -	case I40E_ERR_MEMCPY_FAILED: -		return "I40E_ERR_MEMCPY_FAILED"; -	case I40E_ERR_INVALID_HMC_OBJ_INDEX: -		return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; -	case I40E_ERR_INVALID_HMC_OBJ_COUNT: -		return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; -	case I40E_ERR_INVALID_SRQ_ARM_LIMIT: -		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; -	case I40E_ERR_SRQ_ENABLED: -		return "I40E_ERR_SRQ_ENABLED"; -	case I40E_ERR_ADMIN_QUEUE_ERROR: -		return "I40E_ERR_ADMIN_QUEUE_ERROR"; -	case I40E_ERR_ADMIN_QUEUE_TIMEOUT: -		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; -	case I40E_ERR_BUF_TOO_SHORT: -		return "I40E_ERR_BUF_TOO_SHORT"; -	case I40E_ERR_ADMIN_QUEUE_FULL: -		return "I40E_ERR_ADMIN_QUEUE_FULL"; -	case I40E_ERR_ADMIN_QUEUE_NO_WORK: -		return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; -	case I40E_ERR_BAD_IWARP_CQE: -		return "I40E_ERR_BAD_IWARP_CQE"; -	case I40E_ERR_NVM_BLANK_MODE: -		return "I40E_ERR_NVM_BLANK_MODE"; -	case I40E_ERR_NOT_IMPLEMENTED: -		return "I40E_ERR_NOT_IMPLEMENTED"; -	case I40E_ERR_PE_DOORBELL_NOT_ENABLED: -		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; -	case I40E_ERR_DIAG_TEST_FAILED: -		return "I40E_ERR_DIAG_TEST_FAILED"; -	case I40E_ERR_NOT_READY: -		return "I40E_ERR_NOT_READY"; -	case I40E_NOT_SUPPORTED: -		return "I40E_NOT_SUPPORTED"; -	case I40E_ERR_FIRMWARE_API_VERSION: -		return "I40E_ERR_FIRMWARE_API_VERSION"; -	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: -		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; -	} - -	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); -	return hw->err_str; -} - -/**   * i40e_debug_aq   * @hw: debug mask related to admin queue   * @mask: debug mask @@ -355,13 +207,13 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)   * Tell the Firmware that we're shutting down the AdminQ and whether   * or not the driver is unloading as well.   **/ -i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, -					     bool unloading) +int i40e_aq_queue_shutdown(struct i40e_hw *hw, +			   bool unloading)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_queue_shutdown *cmd =  		(struct i40e_aqc_queue_shutdown *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_queue_shutdown); @@ -384,15 +236,15 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,   *   * Internal function to get or set RSS look up table   **/ -static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, -					   u16 vsi_id, bool pf_lut, -					   u8 *lut, u16 lut_size, -					   bool set) +static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw, +				   u16 vsi_id, bool pf_lut, +				   u8 *lut, u16 lut_size, +				   bool set)  { -	i40e_status status;  	struct i40e_aq_desc desc;  	struct i40e_aqc_get_set_rss_lut *cmd_resp =  		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; +	int status;  	if (set)  		i40e_fill_default_direct_cmd_desc(&desc, @@ -437,8 +289,8 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,   *   * get the RSS lookup table, PF or VSI type   **/ -i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, -				bool pf_lut, u8 *lut, u16 lut_size) +int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, +			bool pf_lut, u8 *lut, u16 lut_size)  {  	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,  				       false); @@ -454,8 +306,8 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,   *   * set the RSS lookup table, PF or VSI type   **/ -i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, -				bool pf_lut, u8 *lut, u16 lut_size) +int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, +			bool pf_lut, u8 *lut, u16 lut_size)  {  	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);  } @@ -469,16 +321,16 @@ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,   *   * get the RSS key per VSI   **/ -static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, -				      u16 vsi_id, -				      struct i40e_aqc_get_set_rss_key_data *key, -				      bool set) +static int i40e_aq_get_set_rss_key(struct i40e_hw *hw, +				   u16 vsi_id, +				   struct i40e_aqc_get_set_rss_key_data *key, +				   bool set)  { -	i40e_status status;  	struct i40e_aq_desc desc;  	struct i40e_aqc_get_set_rss_key *cmd_resp =  			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;  	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); +	int status;  	if (set)  		i40e_fill_default_direct_cmd_desc(&desc, @@ -509,9 +361,9 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,   * @key: pointer to key info struct   *   **/ -i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, -				u16 vsi_id, -				struct i40e_aqc_get_set_rss_key_data *key) +int i40e_aq_get_rss_key(struct i40e_hw *hw, +			u16 vsi_id, +			struct i40e_aqc_get_set_rss_key_data *key)  {  	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);  } @@ -524,9 +376,9 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,   *   * set the RSS key per VSI   **/ -i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, -				u16 vsi_id, -				struct i40e_aqc_get_set_rss_key_data *key) +int i40e_aq_set_rss_key(struct i40e_hw *hw, +			u16 vsi_id, +			struct i40e_aqc_get_set_rss_key_data *key)  {  	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);  } @@ -796,10 +648,10 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {   * hw_addr, back, device_id, vendor_id, subsystem_device_id,   * subsystem_vendor_id, and revision_id   **/ -i40e_status i40e_init_shared_code(struct i40e_hw *hw) +int i40e_init_shared_code(struct i40e_hw *hw)  { -	i40e_status status = 0;  	u32 port, ari, func_rid; +	int status = 0;  	i40e_set_mac_type(hw); @@ -836,15 +688,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)   * @addrs: the requestor's mac addr store   * @cmd_details: pointer to command details structure or NULL   **/ -static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, -				   u16 *flags, -				   struct i40e_aqc_mac_address_read_data *addrs, -				   struct i40e_asq_cmd_details *cmd_details) +static int +i40e_aq_mac_address_read(struct i40e_hw *hw, +			 u16 *flags, +			 struct i40e_aqc_mac_address_read_data *addrs, +			 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_mac_address_read *cmd_data =  		(struct i40e_aqc_mac_address_read *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);  	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); @@ -863,14 +716,14 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,   * @mac_addr: address to write   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, -				    u16 flags, u8 *mac_addr, -				    struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_mac_address_write(struct i40e_hw *hw, +			      u16 flags, u8 *mac_addr, +			      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_mac_address_write *cmd_data =  		(struct i40e_aqc_mac_address_write *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_mac_address_write); @@ -893,11 +746,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,   *   * Reads the adapter's MAC address from register   **/ -i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)  {  	struct i40e_aqc_mac_address_read_data addrs; -	i40e_status status;  	u16 flags = 0; +	int status;  	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); @@ -914,11 +767,11 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)   *   * Reads the adapter's Port MAC address   **/ -i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)  {  	struct i40e_aqc_mac_address_read_data addrs; -	i40e_status status;  	u16 flags = 0; +	int status;  	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);  	if (status) @@ -972,13 +825,13 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)   *   *  Reads the part number string from the EEPROM.   **/ -i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, -				 u32 pba_num_size) +int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, +			 u32 pba_num_size)  { -	i40e_status status = 0;  	u16 pba_word = 0;  	u16 pba_size = 0;  	u16 pba_ptr = 0; +	int status = 0;  	u16 i = 0;  	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); @@ -1087,8 +940,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)   * @hw: pointer to the hardware structure   * @retry_limit: how many times to retry before failure   **/ -static i40e_status i40e_poll_globr(struct i40e_hw *hw, -				   u32 retry_limit) +static int i40e_poll_globr(struct i40e_hw *hw, +			   u32 retry_limit)  {  	u32 cnt, reg = 0; @@ -1114,7 +967,7 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw,   * Assuming someone else has triggered a global reset,   * assure the global reset is complete and then reset the PF   **/ -i40e_status i40e_pf_reset(struct i40e_hw *hw) +int i40e_pf_reset(struct i40e_hw *hw)  {  	u32 cnt = 0;  	u32 cnt1 = 0; @@ -1453,15 +1306,16 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)   *   * Returns the various PHY abilities supported on the Port.   **/ -i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, -			bool qualified_modules, bool report_init, -			struct i40e_aq_get_phy_abilities_resp *abilities, -			struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_get_phy_capabilities(struct i40e_hw *hw, +			     bool qualified_modules, bool report_init, +			     struct i40e_aq_get_phy_abilities_resp *abilities, +			     struct i40e_asq_cmd_details *cmd_details)  { -	struct i40e_aq_desc desc; -	i40e_status status;  	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);  	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; +	struct i40e_aq_desc desc; +	int status;  	if (!abilities)  		return I40E_ERR_PARAM; @@ -1532,14 +1386,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,   * of the PHY Config parameters. This status will be indicated by the   * command response.   **/ -enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, -				struct i40e_aq_set_phy_config *config, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_phy_config(struct i40e_hw *hw, +			   struct i40e_aq_set_phy_config *config, +			   struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aq_set_phy_config *cmd =  			(struct i40e_aq_set_phy_config *)&desc.params.raw; -	enum i40e_status_code status; +	int status;  	if (!config)  		return I40E_ERR_PARAM; @@ -1554,7 +1408,7 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,  	return status;  } -static noinline_for_stack enum i40e_status_code +static noinline_for_stack int  i40e_set_fc_status(struct i40e_hw *hw,  		   struct i40e_aq_get_phy_abilities_resp *abilities,  		   bool atomic_restart) @@ -1612,11 +1466,11 @@ i40e_set_fc_status(struct i40e_hw *hw,   *   * Set the requested flow control mode using set_phy_config.   **/ -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, -				  bool atomic_restart) +int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, +		bool atomic_restart)  {  	struct i40e_aq_get_phy_abilities_resp abilities; -	enum i40e_status_code status; +	int status;  	*aq_failures = 0x0; @@ -1655,13 +1509,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,   *   * Tell the firmware that the driver is taking over from PXE   **/ -i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, +			   struct i40e_asq_cmd_details *cmd_details)  { -	i40e_status status;  	struct i40e_aq_desc desc;  	struct i40e_aqc_clear_pxe *cmd =  		(struct i40e_aqc_clear_pxe *)&desc.params.raw; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_clear_pxe_mode); @@ -1683,14 +1537,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,   *   * Sets up the link and restarts the Auto-Negotiation over the link.   **/ -i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, -					bool enable_link, -					struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_link_restart_an(struct i40e_hw *hw, +				bool enable_link, +				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_link_restart_an *cmd =  		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_link_restart_an); @@ -1715,17 +1569,17 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,   *   * Returns the link status of the adapter.   **/ -i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, -				bool enable_lse, struct i40e_link_status *link, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_link_info(struct i40e_hw *hw, +			  bool enable_lse, struct i40e_link_status *link, +			  struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_get_link_status *resp =  		(struct i40e_aqc_get_link_status *)&desc.params.raw;  	struct i40e_link_status *hw_link_info = &hw->phy.link_info; -	i40e_status status;  	bool tx_pause, rx_pause;  	u16 command_flags; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); @@ -1811,14 +1665,14 @@ aq_get_link_info_exit:   *   * Set link interrupt mask.   **/ -i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, -				     u16 mask, -				     struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, +			     u16 mask, +			     struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_phy_int_mask *cmd =  		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_phy_int_mask); @@ -1831,6 +1685,32 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,  }  /** + * i40e_aq_set_mac_loopback + * @hw: pointer to the HW struct + * @ena_lpbk: Enable or Disable loopback + * @cmd_details: pointer to command details structure or NULL + * + * Enable/disable loopback on a given port + */ +int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk, +			     struct i40e_asq_cmd_details *cmd_details) +{ +	struct i40e_aq_desc desc; +	struct i40e_aqc_set_lb_mode *cmd = +		(struct i40e_aqc_set_lb_mode *)&desc.params.raw; + +	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes); +	if (ena_lpbk) { +		if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER) +			cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY); +		else +			cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL); +	} + +	return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); +} + +/**   * i40e_aq_set_phy_debug   * @hw: pointer to the hw struct   * @cmd_flags: debug command flags @@ -1838,13 +1718,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,   *   * Reset the external PHY.   **/ -i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, -				  struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, +			  struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_phy_debug *cmd =  		(struct i40e_aqc_set_phy_debug *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_phy_debug); @@ -1879,9 +1759,9 @@ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,   *   * Add a VSI context to the hardware.  **/ -i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, -				struct i40e_vsi_context *vsi_ctx, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_add_vsi(struct i40e_hw *hw, +		    struct i40e_vsi_context *vsi_ctx, +		    struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_get_update_vsi *cmd = @@ -1889,7 +1769,7 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,  	struct i40e_aqc_add_get_update_vsi_completion *resp =  		(struct i40e_aqc_add_get_update_vsi_completion *)  		&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_add_vsi); @@ -1923,15 +1803,15 @@ aq_add_vsi_exit:   * @seid: vsi number   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, -				    u16 seid, -				    struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_default_vsi(struct i40e_hw *hw, +			    u16 seid, +			    struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)  		&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -1951,15 +1831,15 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,   * @seid: vsi number   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, -				      u16 seid, -				      struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_clear_default_vsi(struct i40e_hw *hw, +			      u16 seid, +			      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)  		&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -1981,16 +1861,16 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,   * @cmd_details: pointer to command details structure or NULL   * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc   **/ -i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, -				u16 seid, bool set, -				struct i40e_asq_cmd_details *cmd_details, -				bool rx_only_promisc) +int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, +					u16 seid, bool set, +					struct i40e_asq_cmd_details *cmd_details, +					bool rx_only_promisc)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; -	i40e_status status;  	u16 flags = 0; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2021,14 +1901,15 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,   * @set: set multicast promiscuous enable/disable   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, -				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, +					  u16 seid, bool set, +					  struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; -	i40e_status status;  	u16 flags = 0; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2054,16 +1935,16 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,   * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag   * @cmd_details: pointer to command details structure or NULL   **/ -enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, -							 u16 seid, bool enable, -							 u16 vid, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, +				       u16 seid, bool enable, +				       u16 vid, +				       struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; -	enum i40e_status_code status;  	u16 flags = 0; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2090,16 +1971,16 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,   * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag   * @cmd_details: pointer to command details structure or NULL   **/ -enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, -							 u16 seid, bool enable, -							 u16 vid, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, +				       u16 seid, bool enable, +				       u16 vid, +				       struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; -	enum i40e_status_code status;  	u16 flags = 0; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2132,15 +2013,15 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,   * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, -				u16 seid, bool enable, u16 vid, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, +				       u16 seid, bool enable, u16 vid, +				       struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; -	i40e_status status;  	u16 flags = 0; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2167,14 +2048,14 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,   *   * Set or clear the broadcast promiscuous flag (filter) for a given VSI.   **/ -i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, -				u16 seid, bool set_filter, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, +			      u16 seid, bool set_filter, +			      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2200,15 +2081,15 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,   * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, -				       u16 seid, bool enable, -				       struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, +				 u16 seid, bool enable, +				 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =  		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; -	i40e_status status;  	u16 flags = 0; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					i40e_aqc_opc_set_vsi_promiscuous_modes); @@ -2230,9 +2111,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,   * @vsi_ctx: pointer to a vsi context struct   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, -				struct i40e_vsi_context *vsi_ctx, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_vsi_params(struct i40e_hw *hw, +			   struct i40e_vsi_context *vsi_ctx, +			   struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_get_update_vsi *cmd = @@ -2240,7 +2121,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,  	struct i40e_aqc_add_get_update_vsi_completion *resp =  		(struct i40e_aqc_add_get_update_vsi_completion *)  		&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_get_vsi_parameters); @@ -2272,9 +2153,9 @@ aq_get_vsi_params_exit:   *   * Update a VSI context.   **/ -i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, -				struct i40e_vsi_context *vsi_ctx, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_update_vsi_params(struct i40e_hw *hw, +			      struct i40e_vsi_context *vsi_ctx, +			      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_get_update_vsi *cmd = @@ -2282,7 +2163,7 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,  	struct i40e_aqc_add_get_update_vsi_completion *resp =  		(struct i40e_aqc_add_get_update_vsi_completion *)  		&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_update_vsi_parameters); @@ -2310,15 +2191,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,   *   * Fill the buf with switch configuration returned from AdminQ command   **/ -i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, -				struct i40e_aqc_get_switch_config_resp *buf, -				u16 buf_size, u16 *start_seid, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_switch_config(struct i40e_hw *hw, +			      struct i40e_aqc_get_switch_config_resp *buf, +			      u16 buf_size, u16 *start_seid, +			      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_switch_seid *scfg =  		(struct i40e_aqc_switch_seid *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_get_switch_config); @@ -2344,15 +2225,15 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,   *   * Set switch configuration bits   **/ -enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, -						u16 flags, -						u16 valid_flags, u8 mode, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_switch_config(struct i40e_hw *hw, +			      u16 flags, +			      u16 valid_flags, u8 mode, +			      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_switch_config *scfg =  		(struct i40e_aqc_set_switch_config *)&desc.params.raw; -	enum i40e_status_code status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_switch_config); @@ -2381,16 +2262,16 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,   *   * Get the firmware version from the admin queue commands   **/ -i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, -				u16 *fw_major_version, u16 *fw_minor_version, -				u32 *fw_build, -				u16 *api_major_version, u16 *api_minor_version, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_firmware_version(struct i40e_hw *hw, +				 u16 *fw_major_version, u16 *fw_minor_version, +				 u32 *fw_build, +				 u16 *api_major_version, u16 *api_minor_version, +				 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_get_version *resp =  		(struct i40e_aqc_get_version *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); @@ -2420,14 +2301,14 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,   *   * Send the driver version to the firmware   **/ -i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, +int i40e_aq_send_driver_version(struct i40e_hw *hw,  				struct i40e_driver_version *dv,  				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_driver_version *cmd =  		(struct i40e_aqc_driver_version *)&desc.params.raw; -	i40e_status status; +	int status;  	u16 len;  	if (dv == NULL) @@ -2462,9 +2343,9 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,   *   * Side effect: LinkStatusEvent reporting becomes enabled   **/ -i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) +int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)  { -	i40e_status status = 0; +	int status = 0;  	if (hw->phy.get_link_info) {  		status = i40e_update_link_info(hw); @@ -2483,10 +2364,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)   * i40e_update_link_info - update status of the HW network link   * @hw: pointer to the hw struct   **/ -noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) +noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)  {  	struct i40e_aq_get_phy_abilities_resp abilities; -	i40e_status status = 0; +	int status = 0;  	status = i40e_aq_get_link_info(hw, true, NULL, NULL);  	if (status) @@ -2533,19 +2414,19 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)   * This asks the FW to add a VEB between the uplink and downlink   * elements.  If the uplink SEID is 0, this will be a floating VEB.   **/ -i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, -				u16 downlink_seid, u8 enabled_tc, -				bool default_port, u16 *veb_seid, -				bool enable_stats, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, +		    u16 downlink_seid, u8 enabled_tc, +		    bool default_port, u16 *veb_seid, +		    bool enable_stats, +		    struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_veb *cmd =  		(struct i40e_aqc_add_veb *)&desc.params.raw;  	struct i40e_aqc_add_veb_completion *resp =  		(struct i40e_aqc_add_veb_completion *)&desc.params.raw; -	i40e_status status;  	u16 veb_flags = 0; +	int status;  	/* SEIDs need to either both be set or both be 0 for floating VEB */  	if (!!uplink_seid != !!downlink_seid) @@ -2591,17 +2472,17 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,   * This retrieves the parameters for a particular VEB, specified by   * uplink_seid, and returns them to the caller.   **/ -i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, -				u16 veb_seid, u16 *switch_id, -				bool *floating, u16 *statistic_index, -				u16 *vebs_used, u16 *vebs_free, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_veb_parameters(struct i40e_hw *hw, +			       u16 veb_seid, u16 *switch_id, +			       bool *floating, u16 *statistic_index, +			       u16 *vebs_used, u16 *vebs_free, +			       struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =  		(struct i40e_aqc_get_veb_parameters_completion *)  		&desc.params.raw; -	i40e_status status; +	int status;  	if (veb_seid == 0)  		return I40E_ERR_PARAM; @@ -2685,7 +2566,7 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,   *   * Add MAC/VLAN addresses to the HW filtering   **/ -i40e_status +int  i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,  		    struct i40e_aqc_add_macvlan_element_data *mv_list,  		    u16 count, struct i40e_asq_cmd_details *cmd_details) @@ -2717,7 +2598,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,   * It also calls _v2 versions of asq_send_command functions to   * get the aq_status on the stack.   **/ -i40e_status +int  i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,  		       struct i40e_aqc_add_macvlan_element_data *mv_list,  		       u16 count, struct i40e_asq_cmd_details *cmd_details, @@ -2745,15 +2626,16 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,   *   * Remove MAC/VLAN addresses from the HW filtering   **/ -i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, -			struct i40e_aqc_remove_macvlan_element_data *mv_list, -			u16 count, struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, +		       struct i40e_aqc_remove_macvlan_element_data *mv_list, +		       u16 count, struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_macvlan *cmd =  		(struct i40e_aqc_macvlan *)&desc.params.raw; -	i40e_status status;  	u16 buf_size; +	int status;  	if (count == 0 || !mv_list || !hw)  		return I40E_ERR_PARAM; @@ -2792,7 +2674,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,   * It also calls _v2 versions of asq_send_command functions to   * get the aq_status on the stack.   **/ -i40e_status +int  i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,  			  struct i40e_aqc_remove_macvlan_element_data *mv_list,  			  u16 count, struct i40e_asq_cmd_details *cmd_details, @@ -2840,19 +2722,19 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,   * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for   * VEBs/VEPA elements only   **/ -static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, -				u16 opcode, u16 sw_seid, u16 rule_type, u16 id, -				u16 count, __le16 *mr_list, -				struct i40e_asq_cmd_details *cmd_details, -				u16 *rule_id, u16 *rules_used, u16 *rules_free) +static int i40e_mirrorrule_op(struct i40e_hw *hw, +			      u16 opcode, u16 sw_seid, u16 rule_type, u16 id, +			      u16 count, __le16 *mr_list, +			      struct i40e_asq_cmd_details *cmd_details, +			      u16 *rule_id, u16 *rules_used, u16 *rules_free)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_delete_mirror_rule *cmd =  		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;  	struct i40e_aqc_add_delete_mirror_rule_completion *resp =  	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; -	i40e_status status;  	u16 buf_size; +	int status;  	buf_size = count * sizeof(*mr_list); @@ -2900,10 +2782,11 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,   *   * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only   **/ -i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, -			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, -			struct i40e_asq_cmd_details *cmd_details, -			u16 *rule_id, u16 *rules_used, u16 *rules_free) +int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, +			   u16 rule_type, u16 dest_vsi, u16 count, +			   __le16 *mr_list, +			   struct i40e_asq_cmd_details *cmd_details, +			   u16 *rule_id, u16 *rules_used, u16 *rules_free)  {  	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||  	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { @@ -2931,10 +2814,11 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,   *   * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only   **/ -i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, -			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, -			struct i40e_asq_cmd_details *cmd_details, -			u16 *rules_used, u16 *rules_free) +int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, +			      u16 rule_type, u16 rule_id, u16 count, +			      __le16 *mr_list, +			      struct i40e_asq_cmd_details *cmd_details, +			      u16 *rules_used, u16 *rules_free)  {  	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */  	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { @@ -2963,14 +2847,14 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,   *   * send msg to vf   **/ -i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, -				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, +			   u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, +			   struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_pf_vf_message *cmd =  		(struct i40e_aqc_pf_vf_message *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);  	cmd->id = cpu_to_le32(vfid); @@ -2998,14 +2882,14 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,   *   * Read the register using the admin queue commands   **/ -i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, +int i40e_aq_debug_read_register(struct i40e_hw *hw,  				u32 reg_addr, u64 *reg_val,  				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_debug_reg_read_write *cmd_resp =  		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; -	i40e_status status; +	int status;  	if (reg_val == NULL)  		return I40E_ERR_PARAM; @@ -3033,14 +2917,14 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,   *   * Write to a register using the admin queue commands   **/ -i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, -					u32 reg_addr, u64 reg_val, -					struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_debug_write_register(struct i40e_hw *hw, +				 u32 reg_addr, u64 reg_val, +				 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_debug_reg_read_write *cmd =  		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); @@ -3064,16 +2948,16 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,   *   * requests common resource using the admin queue commands   **/ -i40e_status i40e_aq_request_resource(struct i40e_hw *hw, -				enum i40e_aq_resources_ids resource, -				enum i40e_aq_resource_access_type access, -				u8 sdp_number, u64 *timeout, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_request_resource(struct i40e_hw *hw, +			     enum i40e_aq_resources_ids resource, +			     enum i40e_aq_resource_access_type access, +			     u8 sdp_number, u64 *timeout, +			     struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_request_resource *cmd_resp =  		(struct i40e_aqc_request_resource *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); @@ -3103,15 +2987,15 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,   *   * release common resource using the admin queue commands   **/ -i40e_status i40e_aq_release_resource(struct i40e_hw *hw, -				enum i40e_aq_resources_ids resource, -				u8 sdp_number, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_release_resource(struct i40e_hw *hw, +			     enum i40e_aq_resources_ids resource, +			     u8 sdp_number, +			     struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_request_resource *cmd =  		(struct i40e_aqc_request_resource *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); @@ -3135,15 +3019,15 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,   *   * Read the NVM using the admin queue commands   **/ -i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, -				u32 offset, u16 length, void *data, -				bool last_command, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, +		     u32 offset, u16 length, void *data, +		     bool last_command, +		     struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_nvm_update *cmd =  		(struct i40e_aqc_nvm_update *)&desc.params.raw; -	i40e_status status; +	int status;  	/* In offset the highest byte must be zeroed. */  	if (offset & 0xFF000000) { @@ -3181,14 +3065,14 @@ i40e_aq_read_nvm_exit:   *   * Erase the NVM sector using the admin queue commands   **/ -i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, -			      u32 offset, u16 length, bool last_command, -			      struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, +		      u32 offset, u16 length, bool last_command, +		      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_nvm_update *cmd =  		(struct i40e_aqc_nvm_update *)&desc.params.raw; -	i40e_status status; +	int status;  	/* In offset the highest byte must be zeroed. */  	if (offset & 0xFF000000) { @@ -3229,8 +3113,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,  	u32 number, logical_id, phys_id;  	struct i40e_hw_capabilities *p;  	u16 id, ocp_cfg_word0; -	i40e_status status;  	u8 major_rev; +	int status;  	u32 i = 0;  	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; @@ -3471,14 +3355,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,   *   * Get the device capabilities descriptions from the firmware   **/ -i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, -				void *buff, u16 buff_size, u16 *data_size, -				enum i40e_admin_queue_opc list_type_opc, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_discover_capabilities(struct i40e_hw *hw, +				  void *buff, u16 buff_size, u16 *data_size, +				  enum i40e_admin_queue_opc list_type_opc, +				  struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aqc_list_capabilites *cmd;  	struct i40e_aq_desc desc; -	i40e_status status = 0; +	int status = 0;  	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; @@ -3520,15 +3404,15 @@ exit:   *   * Update the NVM using the admin queue commands   **/ -i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, -			       u32 offset, u16 length, void *data, -				bool last_command, u8 preservation_flags, -			       struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, +		       u32 offset, u16 length, void *data, +		       bool last_command, u8 preservation_flags, +		       struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_nvm_update *cmd =  		(struct i40e_aqc_nvm_update *)&desc.params.raw; -	i40e_status status; +	int status;  	/* In offset the highest byte must be zeroed. */  	if (offset & 0xFF000000) { @@ -3573,13 +3457,13 @@ i40e_aq_update_nvm_exit:   *   * Rearrange NVM structure, available only for transition FW   **/ -i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, -				  u8 rearrange_nvm, -				  struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_rearrange_nvm(struct i40e_hw *hw, +			  u8 rearrange_nvm, +			  struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aqc_nvm_update *cmd; -	i40e_status status;  	struct i40e_aq_desc desc; +	int status;  	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; @@ -3613,17 +3497,17 @@ i40e_aq_rearrange_nvm_exit:   *   * Requests the complete LLDP MIB (entire packet).   **/ -i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, -				u8 mib_type, void *buff, u16 buff_size, -				u16 *local_len, u16 *remote_len, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, +			 u8 mib_type, void *buff, u16 buff_size, +			 u16 *local_len, u16 *remote_len, +			 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_lldp_get_mib *cmd =  		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;  	struct i40e_aqc_lldp_get_mib *resp =  		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw; -	i40e_status status; +	int status;  	if (buff_size == 0 || !buff)  		return I40E_ERR_PARAM; @@ -3663,14 +3547,14 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,   *   * Set the LLDP MIB.   **/ -enum i40e_status_code +int  i40e_aq_set_lldp_mib(struct i40e_hw *hw,  		     u8 mib_type, void *buff, u16 buff_size,  		     struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aqc_lldp_set_local_mib *cmd; -	enum i40e_status_code status;  	struct i40e_aq_desc desc; +	int status;  	cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;  	if (buff_size == 0 || !buff) @@ -3702,14 +3586,14 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,   * Enable or Disable posting of an event on ARQ when LLDP MIB   * associated with the interface changes   **/ -i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, -				bool enable_update, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, +				      bool enable_update, +				      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_lldp_update_mib *cmd =  		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); @@ -3731,14 +3615,14 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,   * Restore LLDP Agent factory settings if @restore set to True. In other case   * only returns factory setting in AQ response.   **/ -enum i40e_status_code +int  i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,  		     struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_lldp_restore *cmd =  		(struct i40e_aqc_lldp_restore *)&desc.params.raw; -	i40e_status status; +	int status;  	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {  		i40e_debug(hw, I40E_DEBUG_ALL, @@ -3768,14 +3652,14 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,   *   * Stop or Shutdown the embedded LLDP Agent   **/ -i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, -				bool persist, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, +		      bool persist, +		      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_lldp_stop *cmd =  		(struct i40e_aqc_lldp_stop *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); @@ -3803,13 +3687,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,   *   * Start the embedded LLDP Agent on all ports.   **/ -i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, -			       struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, +		       struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_lldp_start *cmd =  		(struct i40e_aqc_lldp_start *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); @@ -3835,14 +3719,14 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,   * @dcb_enable: True if DCB configuration needs to be applied   *   **/ -enum i40e_status_code +int  i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,  			   struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_dcb_parameters *cmd =  		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; -	i40e_status status; +	int status;  	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))  		return I40E_ERR_DEVICE_NOT_SUPPORTED; @@ -3868,12 +3752,12 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,   *   * Get CEE DCBX mode operational configuration from firmware   **/ -i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, -				       void *buff, u16 buff_size, -				       struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, +			       void *buff, u16 buff_size, +			       struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc; -	i40e_status status; +	int status;  	if (buff_size == 0 || !buff)  		return I40E_ERR_PARAM; @@ -3899,17 +3783,17 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,   * and this function will call cpu_to_le16 to convert from Host byte order to   * Little Endian order.   **/ -i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, -				u16 udp_port, u8 protocol_index, -				u8 *filter_index, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, +			   u16 udp_port, u8 protocol_index, +			   u8 *filter_index, +			   struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_udp_tunnel *cmd =  		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;  	struct i40e_aqc_del_udp_tunnel_completion *resp =  		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); @@ -3930,13 +3814,13 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,   * @index: filter index   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, +			   struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_remove_udp_tunnel *cmd =  		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); @@ -3955,13 +3839,13 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,   *   * This deletes a switch element from the switch.   **/ -i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, +			   struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_switch_seid *cmd =  		(struct i40e_aqc_switch_seid *)&desc.params.raw; -	i40e_status status; +	int status;  	if (seid == 0)  		return I40E_ERR_PARAM; @@ -3985,11 +3869,11 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,   * recomputed and modified. The retval field in the descriptor   * will be set to 0 when RPB is modified.   **/ -i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_dcb_updated(struct i40e_hw *hw, +			struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); @@ -4009,15 +3893,15 @@ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,   *   * Generic command handler for Tx scheduler AQ commands   **/ -static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, +static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,  				void *buff, u16 buff_size, -				 enum i40e_admin_queue_opc opcode, +				enum i40e_admin_queue_opc opcode,  				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_tx_sched_ind *cmd =  		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw; -	i40e_status status; +	int status;  	bool cmd_param_flag = false;  	switch (opcode) { @@ -4067,14 +3951,14 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,   * @max_credit: Max BW limit credits   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, +int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,  				u16 seid, u16 credit, u8 max_credit,  				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_configure_vsi_bw_limit *cmd =  		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_configure_vsi_bw_limit); @@ -4095,10 +3979,10 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,   * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, -			u16 seid, -			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, -			struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, +			     u16 seid, +			     struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, +			     struct i40e_asq_cmd_details *cmd_details)  {  	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),  				    i40e_aqc_opc_configure_vsi_tc_bw, @@ -4113,11 +3997,12 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,   * @opcode: Tx scheduler AQ command opcode   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, -		u16 seid, -		struct i40e_aqc_configure_switching_comp_ets_data *ets_data, -		enum i40e_admin_queue_opc opcode, -		struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, +			       u16 seid, +			       struct i40e_aqc_configure_switching_comp_ets_data *ets_data, +			       enum i40e_admin_queue_opc opcode, +			       struct i40e_asq_cmd_details *cmd_details)  {  	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,  				    sizeof(*ets_data), opcode, cmd_details); @@ -4130,7 +4015,8 @@ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,   * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, +int +i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,  	u16 seid,  	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,  	struct i40e_asq_cmd_details *cmd_details) @@ -4147,10 +4033,11 @@ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,   * @bw_data: Buffer to hold VSI BW configuration   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, -			u16 seid, -			struct i40e_aqc_query_vsi_bw_config_resp *bw_data, -			struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, +			    u16 seid, +			    struct i40e_aqc_query_vsi_bw_config_resp *bw_data, +			    struct i40e_asq_cmd_details *cmd_details)  {  	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),  				    i40e_aqc_opc_query_vsi_bw_config, @@ -4164,10 +4051,11 @@ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,   * @bw_data: Buffer to hold VSI BW configuration per TC   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, -			u16 seid, -			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, -			struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, +				 u16 seid, +				 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, +				 struct i40e_asq_cmd_details *cmd_details)  {  	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),  				    i40e_aqc_opc_query_vsi_ets_sla_config, @@ -4181,10 +4069,11 @@ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,   * @bw_data: Buffer to hold switching component's per TC BW config   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, -		u16 seid, -		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, -		struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, +				     u16 seid, +				     struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, +				     struct i40e_asq_cmd_details *cmd_details)  {  	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),  				   i40e_aqc_opc_query_switching_comp_ets_config, @@ -4198,10 +4087,11 @@ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,   * @bw_data: Buffer to hold current ETS configuration for the Physical Port   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, -			u16 seid, -			struct i40e_aqc_query_port_ets_config_resp *bw_data, -			struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_port_ets_config(struct i40e_hw *hw, +			      u16 seid, +			      struct i40e_aqc_query_port_ets_config_resp *bw_data, +			      struct i40e_asq_cmd_details *cmd_details)  {  	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),  				    i40e_aqc_opc_query_port_ets_config, @@ -4215,10 +4105,11 @@ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,   * @bw_data: Buffer to hold switching component's BW configuration   * @cmd_details: pointer to command details structure or NULL   **/ -i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, -		u16 seid, -		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, -		struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, +				    u16 seid, +				    struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, +				    struct i40e_asq_cmd_details *cmd_details)  {  	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),  				    i40e_aqc_opc_query_switching_comp_bw_config, @@ -4237,8 +4128,9 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,   * Returns 0 if the values passed are valid and within   * range else returns an error.   **/ -static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, -				struct i40e_filter_control_settings *settings) +static int +i40e_validate_filter_settings(struct i40e_hw *hw, +			      struct i40e_filter_control_settings *settings)  {  	u32 fcoe_cntx_size, fcoe_filt_size;  	u32 fcoe_fmax; @@ -4324,11 +4216,11 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,   * for a single PF. It is expected that these settings are programmed   * at the driver initialization time.   **/ -i40e_status i40e_set_filter_control(struct i40e_hw *hw, -				struct i40e_filter_control_settings *settings) +int i40e_set_filter_control(struct i40e_hw *hw, +			    struct i40e_filter_control_settings *settings)  { -	i40e_status ret = 0;  	u32 hash_lut_size = 0; +	int ret = 0;  	u32 val;  	if (!settings) @@ -4398,11 +4290,11 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,   * In return it will update the total number of perfect filter count in   * the stats member.   **/ -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, -				u8 *mac_addr, u16 ethtype, u16 flags, -				u16 vsi_seid, u16 queue, bool is_add, -				struct i40e_control_filter_stats *stats, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, +					  u8 *mac_addr, u16 ethtype, u16 flags, +					  u16 vsi_seid, u16 queue, bool is_add, +					  struct i40e_control_filter_stats *stats, +					  struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_remove_control_packet_filter *cmd = @@ -4411,7 +4303,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,  	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =  		(struct i40e_aqc_add_remove_control_packet_filter_completion *)  		&desc.params.raw; -	i40e_status status; +	int status;  	if (vsi_seid == 0)  		return I40E_ERR_PARAM; @@ -4457,7 +4349,7 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,  		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |  		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;  	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; -	i40e_status status; +	int status;  	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,  						       seid, 0, true, NULL, @@ -4479,14 +4371,14 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,   * is not passed then only register at 'reg_addr0' is read.   *   **/ -static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, -					  u32 reg_addr0, u32 *reg_val0, -					  u32 reg_addr1, u32 *reg_val1) +static int i40e_aq_alternate_read(struct i40e_hw *hw, +				  u32 reg_addr0, u32 *reg_val0, +				  u32 reg_addr1, u32 *reg_val1)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_alternate_write *cmd_resp =  		(struct i40e_aqc_alternate_write *)&desc.params.raw; -	i40e_status status; +	int status;  	if (!reg_val0)  		return I40E_ERR_PARAM; @@ -4515,12 +4407,12 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,   *   * Suspend port's Tx traffic   **/ -i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, -				    struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid, +			    struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aqc_tx_sched_ind *cmd;  	struct i40e_aq_desc desc; -	i40e_status status; +	int status;  	cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx); @@ -4537,11 +4429,11 @@ i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,   *   * Resume port's Tx traffic   **/ -i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, -				   struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_resume_port_tx(struct i40e_hw *hw, +			   struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); @@ -4611,18 +4503,18 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)   * Dump internal FW/HW data for debug purposes.   *   **/ -i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, -			       u8 table_id, u32 start_index, u16 buff_size, -			       void *buff, u16 *ret_buff_size, -			       u8 *ret_next_table, u32 *ret_next_index, -			       struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, +		       u8 table_id, u32 start_index, u16 buff_size, +		       void *buff, u16 *ret_buff_size, +		       u8 *ret_next_table, u32 *ret_next_index, +		       struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_debug_dump_internals *cmd =  		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;  	struct i40e_aqc_debug_dump_internals *resp =  		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw; -	i40e_status status; +	int status;  	if (buff_size == 0 || !buff)  		return I40E_ERR_PARAM; @@ -4663,12 +4555,12 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,   *   * Read bw from the alternate ram for the given pf   **/ -i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, -				      u32 *max_bw, u32 *min_bw, -				      bool *min_valid, bool *max_valid) +int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, +			      u32 *max_bw, u32 *min_bw, +			      bool *min_valid, bool *max_valid)  { -	i40e_status status;  	u32 max_bw_addr, min_bw_addr; +	int status;  	/* Calculate the address of the min/max bw registers */  	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + @@ -4703,13 +4595,14 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,   *   * Configure partitions guaranteed/max bw   **/ -i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, -			struct i40e_aqc_configure_partition_bw_data *bw_data, -			struct i40e_asq_cmd_details *cmd_details) +int +i40e_aq_configure_partition_bw(struct i40e_hw *hw, +			       struct i40e_aqc_configure_partition_bw_data *bw_data, +			       struct i40e_asq_cmd_details *cmd_details)  { -	i40e_status status; -	struct i40e_aq_desc desc;  	u16 bwd_size = sizeof(*bw_data); +	struct i40e_aq_desc desc; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_configure_partition_bw); @@ -4738,11 +4631,11 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,   *   * Reads specified PHY register value   **/ -i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, -					    u16 reg, u8 phy_addr, u16 *value) +int i40e_read_phy_register_clause22(struct i40e_hw *hw, +				    u16 reg, u8 phy_addr, u16 *value)  { -	i40e_status status = I40E_ERR_TIMEOUT;  	u8 port_num = (u8)hw->func_caps.mdio_port_num; +	int status = I40E_ERR_TIMEOUT;  	u32 command = 0;  	u16 retry = 1000; @@ -4783,11 +4676,11 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,   *   * Writes specified PHY register value   **/ -i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, -					     u16 reg, u8 phy_addr, u16 value) +int i40e_write_phy_register_clause22(struct i40e_hw *hw, +				     u16 reg, u8 phy_addr, u16 value)  { -	i40e_status status = I40E_ERR_TIMEOUT;  	u8 port_num = (u8)hw->func_caps.mdio_port_num; +	int status = I40E_ERR_TIMEOUT;  	u32 command  = 0;  	u16 retry = 1000; @@ -4824,13 +4717,13 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,   *   * Reads specified PHY register value   **/ -i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, -				u8 page, u16 reg, u8 phy_addr, u16 *value) +int i40e_read_phy_register_clause45(struct i40e_hw *hw, +				    u8 page, u16 reg, u8 phy_addr, u16 *value)  { -	i40e_status status = I40E_ERR_TIMEOUT; +	u8 port_num = hw->func_caps.mdio_port_num; +	int status = I40E_ERR_TIMEOUT;  	u32 command = 0;  	u16 retry = 1000; -	u8 port_num = hw->func_caps.mdio_port_num;  	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |  		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | @@ -4898,13 +4791,13 @@ phy_read_end:   *   * Writes value to specified PHY register   **/ -i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, -				u8 page, u16 reg, u8 phy_addr, u16 value) +int i40e_write_phy_register_clause45(struct i40e_hw *hw, +				     u8 page, u16 reg, u8 phy_addr, u16 value)  { -	i40e_status status = I40E_ERR_TIMEOUT; -	u32 command = 0; -	u16 retry = 1000;  	u8 port_num = hw->func_caps.mdio_port_num; +	int status = I40E_ERR_TIMEOUT; +	u16 retry = 1000; +	u32 command = 0;  	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |  		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | @@ -4965,10 +4858,10 @@ phy_write_end:   *   * Writes value to specified PHY register   **/ -i40e_status i40e_write_phy_register(struct i40e_hw *hw, -				    u8 page, u16 reg, u8 phy_addr, u16 value) +int i40e_write_phy_register(struct i40e_hw *hw, +			    u8 page, u16 reg, u8 phy_addr, u16 value)  { -	i40e_status status; +	int status;  	switch (hw->device_id) {  	case I40E_DEV_ID_1G_BASE_T_X722: @@ -5004,10 +4897,10 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,   *   * Reads specified PHY register value   **/ -i40e_status i40e_read_phy_register(struct i40e_hw *hw, -				   u8 page, u16 reg, u8 phy_addr, u16 *value) +int i40e_read_phy_register(struct i40e_hw *hw, +			   u8 page, u16 reg, u8 phy_addr, u16 *value)  { -	i40e_status status; +	int status;  	switch (hw->device_id) {  	case I40E_DEV_ID_1G_BASE_T_X722: @@ -5056,17 +4949,17 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)   *   * Blinks PHY link LED   **/ -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, -				    u32 time, u32 interval) +int i40e_blink_phy_link_led(struct i40e_hw *hw, +			    u32 time, u32 interval)  { -	i40e_status status = 0; -	u32 i; -	u16 led_ctl; -	u16 gpio_led_port; -	u16 led_reg;  	u16 led_addr = I40E_PHY_LED_PROV_REG_1; +	u16 gpio_led_port;  	u8 phy_addr = 0; +	int status = 0; +	u16 led_ctl;  	u8 port_num; +	u16 led_reg; +	u32 i;  	i = rd32(hw, I40E_PFGEN_PORTNUM);  	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); @@ -5128,12 +5021,12 @@ phy_blinking_end:   * @led_addr: LED register address   * @reg_val: read register value   **/ -static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, -					      u32 *reg_val) +static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, +			    u32 *reg_val)  { -	enum i40e_status_code status;  	u8 phy_addr = 0;  	u8 port_num; +	int status;  	u32 i;  	*reg_val = 0; @@ -5162,12 +5055,12 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,   * @led_addr: LED register address   * @reg_val: register value to write   **/ -static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, -					      u32 reg_val) +static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, +			    u32 reg_val)  { -	enum i40e_status_code status;  	u8 phy_addr = 0;  	u8 port_num; +	int status;  	u32 i;  	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { @@ -5197,17 +5090,17 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,   * @val: original value of register to use   *   **/ -i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, -			     u16 *val) +int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, +		     u16 *val)  { -	i40e_status status = 0;  	u16 gpio_led_port;  	u8 phy_addr = 0; -	u16 reg_val; +	u32 reg_val_aq; +	int status = 0;  	u16 temp_addr; +	u16 reg_val;  	u8 port_num;  	u32 i; -	u32 reg_val_aq;  	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {  		status = @@ -5252,12 +5145,12 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,   * Set led's on or off when controlled by the PHY   *   **/ -i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, -			     u16 led_addr, u32 mode) +int i40e_led_set_phy(struct i40e_hw *hw, bool on, +		     u16 led_addr, u32 mode)  { -	i40e_status status = 0;  	u32 led_ctl = 0;  	u32 led_reg = 0; +	int status = 0;  	status = i40e_led_get_reg(hw, led_addr, &led_reg);  	if (status) @@ -5301,14 +5194,14 @@ restore_config:   * Use the firmware to read the Rx control register,   * especially useful if the Rx unit is under heavy pressure   **/ -i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, -				u32 reg_addr, u32 *reg_val, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, +				 u32 reg_addr, u32 *reg_val, +				 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =  		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; -	i40e_status status; +	int status;  	if (!reg_val)  		return I40E_ERR_PARAM; @@ -5332,8 +5225,8 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,   **/  u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)  { -	i40e_status status = 0;  	bool use_register; +	int status = 0;  	int retry = 5;  	u32 val = 0; @@ -5367,14 +5260,14 @@ do_retry:   * Use the firmware to write to an Rx control register,   * especially useful if the Rx unit is under heavy pressure   **/ -i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, -				u32 reg_addr, u32 reg_val, -				struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, +				  u32 reg_addr, u32 reg_val, +				  struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_rx_ctl_reg_read_write *cmd =  		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); @@ -5394,8 +5287,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,   **/  void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)  { -	i40e_status status = 0;  	bool use_register; +	int status = 0;  	int retry = 5;  	use_register = (((hw->aq.api_maj_ver == 1) && @@ -5457,16 +5350,16 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,   * NOTE: In common cases MDIO I/F number should not be changed, thats why you   * may use simple wrapper i40e_aq_set_phy_register.   **/ -enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw, -			     u8 phy_select, u8 dev_addr, bool page_change, -			     bool set_mdio, u8 mdio_num, -			     u32 reg_addr, u32 reg_val, -			     struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_set_phy_register_ext(struct i40e_hw *hw, +				 u8 phy_select, u8 dev_addr, bool page_change, +				 bool set_mdio, u8 mdio_num, +				 u32 reg_addr, u32 reg_val, +				 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_phy_register_access *cmd =  		(struct i40e_aqc_phy_register_access *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_set_phy_register); @@ -5502,16 +5395,16 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,   * NOTE: In common cases MDIO I/F number should not be changed, thats why you   * may use simple wrapper i40e_aq_get_phy_register.   **/ -enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw, -			     u8 phy_select, u8 dev_addr, bool page_change, -			     bool set_mdio, u8 mdio_num, -			     u32 reg_addr, u32 *reg_val, -			     struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_phy_register_ext(struct i40e_hw *hw, +				 u8 phy_select, u8 dev_addr, bool page_change, +				 bool set_mdio, u8 mdio_num, +				 u32 reg_addr, u32 *reg_val, +				 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_phy_register_access *cmd =  		(struct i40e_aqc_phy_register_access *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_get_phy_register); @@ -5542,18 +5435,17 @@ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,   * @error_info: returns error information   * @cmd_details: pointer to command details structure or NULL   **/ -enum -i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, -				   u16 buff_size, u32 track_id, -				   u32 *error_offset, u32 *error_info, -				   struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, +		      u16 buff_size, u32 track_id, +		      u32 *error_offset, u32 *error_info, +		      struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_write_personalization_profile *cmd =  		(struct i40e_aqc_write_personalization_profile *)  		&desc.params.raw;  	struct i40e_aqc_write_ddp_resp *resp; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_write_personalization_profile); @@ -5586,15 +5478,14 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,   * @flags: AdminQ command flags   * @cmd_details: pointer to command details structure or NULL   **/ -enum -i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, -				      u16 buff_size, u8 flags, -				      struct i40e_asq_cmd_details *cmd_details) +int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, +			 u16 buff_size, u8 flags, +			 struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_get_applied_profiles *cmd =  		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw; -	i40e_status status; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_get_personalization_profile_list); @@ -5693,14 +5584,13 @@ i40e_find_section_in_profile(u32 section_type,   * @hw: pointer to the hw struct   * @aq: command buffer containing all data to execute AQ   **/ -static enum -i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, -					  struct i40e_profile_aq_section *aq) +static int i40e_ddp_exec_aq_section(struct i40e_hw *hw, +				    struct i40e_profile_aq_section *aq)  { -	i40e_status status;  	struct i40e_aq_desc desc;  	u8 *msg = NULL;  	u16 msglen; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);  	desc.flags |= cpu_to_le16(aq->flags); @@ -5740,14 +5630,14 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,   *   * Validates supported devices and profile's sections.   */ -static enum i40e_status_code +static int  i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,  		      u32 track_id, bool rollback)  {  	struct i40e_profile_section_header *sec = NULL; -	i40e_status status = 0;  	struct i40e_section_table *sec_tbl;  	u32 vendor_dev_id; +	int status = 0;  	u32 dev_cnt;  	u32 sec_off;  	u32 i; @@ -5805,16 +5695,16 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,   *   * Handles the download of a complete package.   */ -enum i40e_status_code +int  i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,  		   u32 track_id)  { -	i40e_status status = 0; -	struct i40e_section_table *sec_tbl;  	struct i40e_profile_section_header *sec = NULL;  	struct i40e_profile_aq_section *ddp_aq; -	u32 section_size = 0; +	struct i40e_section_table *sec_tbl;  	u32 offset = 0, info = 0; +	u32 section_size = 0; +	int status = 0;  	u32 sec_off;  	u32 i; @@ -5868,15 +5758,15 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,   *   * Rolls back previously loaded package.   */ -enum i40e_status_code +int  i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,  		      u32 track_id)  {  	struct i40e_profile_section_header *sec = NULL; -	i40e_status status = 0;  	struct i40e_section_table *sec_tbl;  	u32 offset = 0, info = 0;  	u32 section_size = 0; +	int status = 0;  	u32 sec_off;  	int i; @@ -5920,15 +5810,15 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,   *   * Register a profile to the list of loaded profiles.   */ -enum i40e_status_code +int  i40e_add_pinfo_to_list(struct i40e_hw *hw,  		       struct i40e_profile_segment *profile,  		       u8 *profile_info_sec, u32 track_id)  { -	i40e_status status = 0;  	struct i40e_profile_section_header *sec = NULL;  	struct i40e_profile_info *pinfo;  	u32 offset = 0, info = 0; +	int status = 0;  	sec = (struct i40e_profile_section_header *)profile_info_sec;  	sec->tbl_size = 1; @@ -5962,7 +5852,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,   * of the function.   *   **/ -enum i40e_status_code +int  i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,  			  struct i40e_aqc_cloud_filters_element_data *filters,  			  u8 filter_count) @@ -5970,8 +5860,8 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_remove_cloud_filters *cmd =  	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; -	enum i40e_status_code status;  	u16 buff_len; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_add_cloud_filters); @@ -5999,7 +5889,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,   * function.   *   **/ -enum i40e_status_code +int  i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,  			     struct i40e_aqc_cloud_filters_element_bb *filters,  			     u8 filter_count) @@ -6007,8 +5897,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_remove_cloud_filters *cmd =  	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; -	i40e_status status;  	u16 buff_len; +	int status;  	int i;  	i40e_fill_default_direct_cmd_desc(&desc, @@ -6056,7 +5946,7 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,   * of the function.   *   **/ -enum i40e_status_code +int  i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,  			  struct i40e_aqc_cloud_filters_element_data *filters,  			  u8 filter_count) @@ -6064,8 +5954,8 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_remove_cloud_filters *cmd =  	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; -	enum i40e_status_code status;  	u16 buff_len; +	int status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_remove_cloud_filters); @@ -6093,7 +5983,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,   * function.   *   **/ -enum i40e_status_code +int  i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,  			     struct i40e_aqc_cloud_filters_element_bb *filters,  			     u8 filter_count) @@ -6101,8 +5991,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,  	struct i40e_aq_desc desc;  	struct i40e_aqc_add_remove_cloud_filters *cmd =  	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; -	i40e_status status;  	u16 buff_len; +	int status;  	int i;  	i40e_fill_default_direct_cmd_desc(&desc, diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 673f341f4c0c..90638b67f8dc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -12,7 +12,7 @@   *   * Get the DCBX status from the Firmware   **/ -i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) +int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)  {  	u32 reg; @@ -497,15 +497,15 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,   *   * Parse DCB configuration from the LLDPDU   **/ -i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, -				    struct i40e_dcbx_config *dcbcfg) +int i40e_lldp_to_dcb_config(u8 *lldpmib, +			    struct i40e_dcbx_config *dcbcfg)  { -	i40e_status ret = 0;  	struct i40e_lldp_org_tlv *tlv; -	u16 type; -	u16 length;  	u16 typelength;  	u16 offset = 0; +	int ret = 0; +	u16 length; +	u16 type;  	if (!lldpmib || !dcbcfg)  		return I40E_ERR_PARAM; @@ -551,12 +551,12 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,   *   * Query DCB configuration from the Firmware   **/ -i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, -				   u8 bridgetype, -				   struct i40e_dcbx_config *dcbcfg) +int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, +			   u8 bridgetype, +			   struct i40e_dcbx_config *dcbcfg)  { -	i40e_status ret = 0;  	struct i40e_virt_mem mem; +	int ret = 0;  	u8 *lldpmib;  	/* Allocate the LLDPDU */ @@ -767,9 +767,9 @@ static void i40e_cee_to_dcb_config(   *   * Get IEEE mode DCB configuration from the Firmware   **/ -static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw) +static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)  { -	i40e_status ret = 0; +	int ret = 0;  	/* IEEE mode */  	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; @@ -797,11 +797,11 @@ out:   *   * Get DCB configuration from the Firmware   **/ -i40e_status i40e_get_dcb_config(struct i40e_hw *hw) +int i40e_get_dcb_config(struct i40e_hw *hw)  { -	i40e_status ret = 0; -	struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;  	struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; +	struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; +	int ret = 0;  	/* If Firmware version < v4.33 on X710/XL710, IEEE only */  	if ((hw->mac.type == I40E_MAC_XL710) && @@ -867,11 +867,11 @@ out:   *   * Update DCB configuration from the Firmware   **/ -i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) +int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)  { -	i40e_status ret = 0;  	struct i40e_lldp_variables lldp_cfg;  	u8 adminstatus = 0; +	int ret = 0;  	if (!hw->func_caps.dcb)  		return I40E_NOT_SUPPORTED; @@ -940,13 +940,13 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)   * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.   * Status of agent is reported via @lldp_status parameter.   **/ -enum i40e_status_code +int  i40e_get_fw_lldp_status(struct i40e_hw *hw,  			enum i40e_get_fw_lldp_status_resp *lldp_status)  {  	struct i40e_virt_mem mem; -	i40e_status ret;  	u8 *lldpmib; +	int ret;  	if (!lldp_status)  		return I40E_ERR_PARAM; @@ -1238,13 +1238,13 @@ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,   *   * Set DCB configuration to the Firmware   **/ -i40e_status i40e_set_dcb_config(struct i40e_hw *hw) +int i40e_set_dcb_config(struct i40e_hw *hw)  {  	struct i40e_dcbx_config *dcbcfg;  	struct i40e_virt_mem mem;  	u8 mib_type, *lldpmib; -	i40e_status ret;  	u16 miblen; +	int ret;  	/* update the hw local config */  	dcbcfg = &hw->local_dcbx_config; @@ -1274,8 +1274,8 @@ i40e_status i40e_set_dcb_config(struct i40e_hw *hw)   *   * send DCB configuration to FW   **/ -i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, -				    struct i40e_dcbx_config *dcbcfg) +int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, +			    struct i40e_dcbx_config *dcbcfg)  {  	u16 length, offset = 0, tlvid, typelength;  	struct i40e_lldp_org_tlv *tlv; @@ -1888,13 +1888,13 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,   *   * Reads the LLDP configuration data from NVM using passed addresses   **/ -static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw, -				       struct i40e_lldp_variables *lldp_cfg, -				       u8 module, u32 word_offset) +static int _i40e_read_lldp_cfg(struct i40e_hw *hw, +			       struct i40e_lldp_variables *lldp_cfg, +			       u8 module, u32 word_offset)  {  	u32 address, offset = (2 * word_offset); -	i40e_status ret;  	__le16 raw_mem; +	int ret;  	u16 mem;  	ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); @@ -1950,10 +1950,10 @@ err_lldp_cfg:   *   * Reads the LLDP configuration data from NVM   **/ -i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, -			       struct i40e_lldp_variables *lldp_cfg) +int i40e_read_lldp_cfg(struct i40e_hw *hw, +		       struct i40e_lldp_variables *lldp_cfg)  { -	i40e_status ret = 0; +	int ret = 0;  	u32 mem;  	if (!lldp_cfg) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 2370ceecb061..6b60dc9b7736 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -264,20 +264,20 @@ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,  void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,  			      struct i40e_rx_pb_config *old_pb_cfg,  			      struct i40e_rx_pb_config *new_pb_cfg); -i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, -				 u16 *status); -i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, -				    struct i40e_dcbx_config *dcbcfg); -i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, -				   u8 bridgetype, -				   struct i40e_dcbx_config *dcbcfg); -i40e_status i40e_get_dcb_config(struct i40e_hw *hw); -i40e_status i40e_init_dcb(struct i40e_hw *hw, -			  bool enable_mib_change); -enum i40e_status_code +int i40e_get_dcbx_status(struct i40e_hw *hw, +			 u16 *status); +int i40e_lldp_to_dcb_config(u8 *lldpmib, +			    struct i40e_dcbx_config *dcbcfg); +int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, +			   u8 bridgetype, +			   struct i40e_dcbx_config *dcbcfg); +int i40e_get_dcb_config(struct i40e_hw *hw); +int i40e_init_dcb(struct i40e_hw *hw, +		  bool enable_mib_change); +int  i40e_get_fw_lldp_status(struct i40e_hw *hw,  			enum i40e_get_fw_lldp_status_resp *lldp_status); -i40e_status i40e_set_dcb_config(struct i40e_hw *hw); -i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, -				    struct i40e_dcbx_config *dcbcfg); +int i40e_set_dcb_config(struct i40e_hw *hw); +int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, +			    struct i40e_dcbx_config *dcbcfg);  #endif /* _I40E_DCB_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index e32c61909b31..195421d863ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,  	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Failed setting DCB ETS configuration err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Failed setting DCB ETS configuration err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return -EINVAL;  	} @@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,  	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Failed setting DCB PFC configuration err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Failed setting DCB PFC configuration err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return -EINVAL;  	} @@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,  	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Failed setting DCB configuration err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Failed setting DCB configuration err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return -EINVAL;  	} @@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,  	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Failed setting DCB configuration err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Failed setting DCB configuration err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return -EINVAL;  	} diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index e1069ae658ad..7e8183762fd9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -36,7 +36,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,  {  	struct i40e_ddp_profile_list *profile_list;  	u8 buff[I40E_PROFILE_LIST_SIZE]; -	i40e_status status; +	int status;  	int i;  	status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, @@ -91,7 +91,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,  {  	struct i40e_ddp_profile_list *profile_list;  	u8 buff[I40E_PROFILE_LIST_SIZE]; -	i40e_status status; +	int status;  	int i;  	status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, @@ -117,14 +117,14 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,   *   * Register a profile to the list of loaded profiles.   */ -static enum i40e_status_code +static int  i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,  	       u8 *profile_info_sec, u32 track_id)  {  	struct i40e_profile_section_header *sec;  	struct i40e_profile_info *pinfo; -	i40e_status status;  	u32 offset = 0, info = 0; +	int status;  	sec = (struct i40e_profile_section_header *)profile_info_sec;  	sec->tbl_size = 1; @@ -157,14 +157,14 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,   *   * Removes DDP profile from the NIC.   **/ -static enum i40e_status_code +static int  i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,  	       u8 *profile_info_sec, u32 track_id)  {  	struct i40e_profile_section_header *sec;  	struct i40e_profile_info *pinfo; -	i40e_status status;  	u32 offset = 0, info = 0; +	int status;  	sec = (struct i40e_profile_section_header *)profile_info_sec;  	sec->tbl_size = 1; @@ -270,12 +270,12 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,  	struct i40e_profile_segment *profile_hdr;  	struct i40e_profile_info pinfo;  	struct i40e_package_header *pkg_hdr; -	i40e_status status;  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back;  	u32 track_id;  	int istatus; +	int status;  	pkg_hdr = (struct i40e_package_header *)data;  	if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c9dcd6d92c83..9954493cd448 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -918,9 +918,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  		dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);  		i40e_veb_release(pf->veb[i]);  	} else if (strncmp(cmd_buf, "add pvid", 8) == 0) { -		i40e_status ret; -		u16 vid;  		unsigned int v; +		int ret; +		u16 vid;  		cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);  		if (cnt != 2) { @@ -1284,7 +1284,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  		}  	} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {  		struct i40e_aq_desc *desc; -		i40e_status ret; +		int ret;  		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);  		if (!desc) @@ -1330,9 +1330,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  		desc = NULL;  	} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {  		struct i40e_aq_desc *desc; -		i40e_status ret;  		u16 buffer_len;  		u8 *buff; +		int ret;  		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);  		if (!desc) diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index ef4d3762bf37..5b3519c6e362 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -10,8 +10,8 @@   * @reg: reg to be tested   * @mask: bits to be touched   **/ -static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, -							u32 reg, u32 mask) +static int i40e_diag_reg_pattern_test(struct i40e_hw *hw, +				      u32 reg, u32 mask)  {  	static const u32 patterns[] = {  		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF @@ -74,9 +74,9 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = {   *   * Perform registers diagnostic test   **/ -i40e_status i40e_diag_reg_test(struct i40e_hw *hw) +int i40e_diag_reg_test(struct i40e_hw *hw)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	u32 reg, mask;  	u32 i, j; @@ -114,9 +114,9 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)   *   * Perform EEPROM diagnostic test   **/ -i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) +int i40e_diag_eeprom_test(struct i40e_hw *hw)  { -	i40e_status ret_code; +	int ret_code;  	u16 reg_val;  	/* read NVM control word and if NVM valid, validate EEPROM checksum*/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index c3340f320a18..e641035c7297 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -22,7 +22,7 @@ struct i40e_diag_reg_test_info {  extern struct i40e_diag_reg_test_info i40e_reg_list[]; -i40e_status i40e_diag_reg_test(struct i40e_hw *hw); -i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw); +int i40e_diag_reg_test(struct i40e_hw *hw); +int i40e_diag_eeprom_test(struct i40e_hw *hw);  #endif /* _I40E_DIAG_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4a6a6e48c615..4934ff58332c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -154,7 +154,7 @@ __i40e_add_ethtool_stats(u64 **data, void *pointer,   * @ring: the ring to copy   *   * Queue statistics must be copied while protected by - * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * u64_stats_fetch_begin, so we can't directly use i40e_add_ethtool_stats.   * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the   * ring pointer is null, zero out the queue stat values and update the data   * pointer. Otherwise safely copy the stats from the ring into the supplied @@ -172,16 +172,16 @@ i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)  	/* To avoid invalid statistics values, ensure that we keep retrying  	 * the copy until we get a consistent value according to -	 * u64_stats_fetch_retry_irq. But first, make sure our ring is +	 * u64_stats_fetch_retry. But first, make sure our ring is  	 * non-null before attempting to access its syncp.  	 */  	do { -		start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); +		start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);  		for (i = 0; i < size; i++) {  			i40e_add_one_ethtool_stat(&(*data)[i], ring,  						  &stats[i]);  		} -	} while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); +	} while (ring && u64_stats_fetch_retry(&ring->syncp, start));  	/* Once we successfully copy the stats in, update the data pointer */  	*data += size; @@ -1226,8 +1226,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_hw *hw = &pf->hw;  	bool autoneg_changed = false; -	i40e_status status = 0;  	int timeout = 50; +	int status = 0;  	int err = 0;  	__u32 speed;  	u8 autoneg; @@ -1287,8 +1287,10 @@ static int i40e_set_link_ksettings(struct net_device *netdev,  	 * trying to set something that we do not support.  	 */  	if (memcmp(©_ks.base, &safe_ks.base, -		   sizeof(struct ethtool_link_settings))) +		   sizeof(struct ethtool_link_settings))) { +		netdev_err(netdev, "Only speed and autoneg are supported.\n");  		return -EOPNOTSUPP; +	}  	while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {  		timeout--; @@ -1453,8 +1455,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,  		status = i40e_aq_set_phy_config(hw, &config, NULL);  		if (status) {  			netdev_info(netdev, -				    "Set phy config failed, err %s aq_err %s\n", -				    i40e_stat_str(hw, status), +				    "Set phy config failed, err %pe aq_err %s\n", +				    ERR_PTR(status),  				    i40e_aq_str(hw, hw->aq.asq_last_status));  			err = -EAGAIN;  			goto done; @@ -1463,8 +1465,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,  		status = i40e_update_link_info(hw);  		if (status)  			netdev_dbg(netdev, -				   "Updating link info failed with err %s aq_err %s\n", -				   i40e_stat_str(hw, status), +				   "Updating link info failed with err %pe aq_err %s\n", +				   ERR_PTR(status),  				   i40e_aq_str(hw, hw->aq.asq_last_status));  	} else { @@ -1483,7 +1485,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)  	struct i40e_aq_get_phy_abilities_resp abilities;  	struct i40e_pf *pf = np->vsi->back;  	struct i40e_hw *hw = &pf->hw; -	i40e_status status = 0; +	int status = 0;  	u32 flags = 0;  	int err = 0; @@ -1515,8 +1517,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)  		status = i40e_aq_set_phy_config(hw, &config, NULL);  		if (status) {  			netdev_info(netdev, -				    "Set phy config failed, err %s aq_err %s\n", -				    i40e_stat_str(hw, status), +				    "Set phy config failed, err %pe aq_err %s\n", +				    ERR_PTR(status),  				    i40e_aq_str(hw, hw->aq.asq_last_status));  			err = -EAGAIN;  			goto done; @@ -1529,8 +1531,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)  			 * (e.g. no physical connection etc.)  			 */  			netdev_dbg(netdev, -				   "Updating link info failed with err %s aq_err %s\n", -				   i40e_stat_str(hw, status), +				   "Updating link info failed with err %pe aq_err %s\n", +				   ERR_PTR(status),  				   i40e_aq_str(hw, hw->aq.asq_last_status));  	} @@ -1545,7 +1547,7 @@ static int i40e_get_fec_param(struct net_device *netdev,  	struct i40e_aq_get_phy_abilities_resp abilities;  	struct i40e_pf *pf = np->vsi->back;  	struct i40e_hw *hw = &pf->hw; -	i40e_status status = 0; +	int status = 0;  	int err = 0;  	u8 fec_cfg; @@ -1632,12 +1634,12 @@ static int i40e_nway_reset(struct net_device *netdev)  	struct i40e_pf *pf = np->vsi->back;  	struct i40e_hw *hw = &pf->hw;  	bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; -	i40e_status ret = 0; +	int ret = 0;  	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);  	if (ret) { -		netdev_info(netdev, "link restart failed, err %s aq_err %s\n", -			    i40e_stat_str(hw, ret), +		netdev_info(netdev, "link restart failed, err %pe aq_err %s\n", +			    ERR_PTR(ret),  			    i40e_aq_str(hw, hw->aq.asq_last_status));  		return -EIO;  	} @@ -1697,9 +1699,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,  	struct i40e_link_status *hw_link_info = &hw->phy.link_info;  	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;  	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; -	i40e_status status;  	u8 aq_failures;  	int err = 0; +	int status;  	u32 is_an;  	/* Changing the port's flow control is not supported if this isn't the @@ -1753,20 +1755,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,  	status = i40e_set_fc(hw, &aq_failures, link_up);  	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { -		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", -			    i40e_stat_str(hw, status), +		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n", +			    ERR_PTR(status),  			    i40e_aq_str(hw, hw->aq.asq_last_status));  		err = -EAGAIN;  	}  	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { -		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", -			    i40e_stat_str(hw, status), +		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n", +			    ERR_PTR(status),  			    i40e_aq_str(hw, hw->aq.asq_last_status));  		err = -EAGAIN;  	}  	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { -		netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", -			    i40e_stat_str(hw, status), +		netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n", +			    ERR_PTR(status),  			    i40e_aq_str(hw, hw->aq.asq_last_status));  		err = -EAGAIN;  	} @@ -2581,8 +2583,8 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_pf *pf = np->vsi->back; -	i40e_status status;  	bool link_up = false; +	int status;  	netif_info(pf, hw, netdev, "link test\n");  	status = i40e_get_link_status(&pf->hw, &link_up); @@ -2805,11 +2807,11 @@ static int i40e_set_phys_id(struct net_device *netdev,  			    enum ethtool_phys_id_state state)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev); -	i40e_status ret = 0;  	struct i40e_pf *pf = np->vsi->back;  	struct i40e_hw *hw = &pf->hw;  	int blink_freq = 2;  	u16 temp_status; +	int ret = 0;  	switch (state) {  	case ETHTOOL_ID_ACTIVE: @@ -4464,11 +4466,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,  			return -EOPNOTSUPP;  		/* First 4 bytes of L4 header */ -		if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF)) -			new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; -		else if (!usr_ip4_spec->l4_4_bytes) -			new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); -		else +		if (usr_ip4_spec->l4_4_bytes)  			return -EOPNOTSUPP;  		/* Filtering on Type of Service is not supported. */ @@ -4507,11 +4505,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,  		else  			return -EOPNOTSUPP; -		if (usr_ip6_spec->l4_4_bytes == htonl(0xFFFFFFFF)) -			new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; -		else if (!usr_ip6_spec->l4_4_bytes) -			new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); -		else +		if (usr_ip6_spec->l4_4_bytes)  			return -EOPNOTSUPP;  		/* Filtering on Traffic class is not supported. */ @@ -5253,7 +5247,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back;  	u32 reset_needed = 0; -	i40e_status status; +	int status;  	u32 i, j;  	orig_flags = READ_ONCE(pf->flags); @@ -5368,8 +5362,8 @@ flags_complete:  						0, NULL);  		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {  			dev_info(&pf->pdev->dev, -				 "couldn't set switch config bits, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +				 "couldn't set switch config bits, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  			/* not a fatal problem, just keep going */ @@ -5441,9 +5435,8 @@ flags_complete:  					return -EBUSY;  				default:  					dev_warn(&pf->pdev->dev, -						 "Starting FW LLDP agent failed: error: %s, %s\n", -						 i40e_stat_str(&pf->hw, -							       status), +						 "Starting FW LLDP agent failed: error: %pe, %s\n", +						 ERR_PTR(status),  						 i40e_aq_str(&pf->hw,  							     adq_err));  					return -EINVAL; @@ -5483,8 +5476,8 @@ static int i40e_get_module_info(struct net_device *netdev,  	u32 sff8472_comp = 0;  	u32 sff8472_swap = 0;  	u32 sff8636_rev = 0; -	i40e_status status;  	u32 type = 0; +	int status;  	/* Check if firmware supports reading module EEPROM. */  	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { @@ -5588,8 +5581,8 @@ static int i40e_get_module_eeprom(struct net_device *netdev,  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw;  	bool is_sfp = false; -	i40e_status status;  	u32 value = 0; +	int status;  	int i;  	if (!ee || !ee->len || !data) @@ -5630,10 +5623,10 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_aq_get_phy_abilities_resp phy_cfg; -	enum i40e_status_code status = 0;  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw; +	int status = 0;  	/* Get initial PHY capabilities */  	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL); @@ -5695,11 +5688,11 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_aq_get_phy_abilities_resp abilities; -	enum i40e_status_code status = I40E_SUCCESS;  	struct i40e_aq_set_phy_config config;  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw; +	int status = I40E_SUCCESS;  	__le16 eee_capability;  	/* Deny parameters we don't support */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 163ee8c6311c..46f7950a0049 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -17,17 +17,17 @@   * @type: what type of segment descriptor we're manipulating   * @direct_mode_sz: size to alloc in direct mode   **/ -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, -					      struct i40e_hmc_info *hmc_info, -					      u32 sd_index, -					      enum i40e_sd_entry_type type, -					      u64 direct_mode_sz) +int i40e_add_sd_table_entry(struct i40e_hw *hw, +			    struct i40e_hmc_info *hmc_info, +			    u32 sd_index, +			    enum i40e_sd_entry_type type, +			    u64 direct_mode_sz)  {  	enum i40e_memory_type mem_type __attribute__((unused));  	struct i40e_hmc_sd_entry *sd_entry;  	bool dma_mem_alloc_done = false; +	int ret_code = I40E_SUCCESS;  	struct i40e_dma_mem mem; -	i40e_status ret_code = I40E_SUCCESS;  	u64 alloc_len;  	if (NULL == hmc_info->sd_table.sd_entry) { @@ -106,19 +106,19 @@ exit:   *	   aligned on 4K boundary and zeroed memory.   *	2. It should be 4K in size.   **/ -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, -					      struct i40e_hmc_info *hmc_info, -					      u32 pd_index, -					      struct i40e_dma_mem *rsrc_pg) +int i40e_add_pd_table_entry(struct i40e_hw *hw, +			    struct i40e_hmc_info *hmc_info, +			    u32 pd_index, +			    struct i40e_dma_mem *rsrc_pg)  { -	i40e_status ret_code = 0;  	struct i40e_hmc_pd_table *pd_table;  	struct i40e_hmc_pd_entry *pd_entry;  	struct i40e_dma_mem mem;  	struct i40e_dma_mem *page = &mem;  	u32 sd_idx, rel_pd_idx; -	u64 *pd_addr; +	int ret_code = 0;  	u64 page_desc; +	u64 *pd_addr;  	if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {  		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; @@ -185,15 +185,15 @@ exit:   *	1. Caller can deallocate the memory used by backing storage after this   *	   function returns.   **/ -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, -					struct i40e_hmc_info *hmc_info, -					u32 idx) +int i40e_remove_pd_bp(struct i40e_hw *hw, +		      struct i40e_hmc_info *hmc_info, +		      u32 idx)  { -	i40e_status ret_code = 0;  	struct i40e_hmc_pd_entry *pd_entry;  	struct i40e_hmc_pd_table *pd_table;  	struct i40e_hmc_sd_entry *sd_entry;  	u32 sd_idx, rel_pd_idx; +	int ret_code = 0;  	u64 *pd_addr;  	/* calculate index */ @@ -241,11 +241,11 @@ exit:   * @hmc_info: pointer to the HMC configuration information structure   * @idx: the page index   **/ -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, -					     u32 idx) +int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, +			   u32 idx)  { -	i40e_status ret_code = 0;  	struct i40e_hmc_sd_entry *sd_entry; +	int ret_code = 0;  	/* get the entry and decrease its ref counter */  	sd_entry = &hmc_info->sd_table.sd_entry[idx]; @@ -269,9 +269,9 @@ exit:   * @idx: the page index   * @is_pf: used to distinguish between VF and PF   **/ -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, -					    struct i40e_hmc_info *hmc_info, -					    u32 idx, bool is_pf) +int i40e_remove_sd_bp_new(struct i40e_hw *hw, +			  struct i40e_hmc_info *hmc_info, +			  u32 idx, bool is_pf)  {  	struct i40e_hmc_sd_entry *sd_entry; @@ -290,11 +290,11 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,   * @hmc_info: pointer to the HMC configuration information structure   * @idx: segment descriptor index to find the relevant page descriptor   **/ -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, -					       u32 idx) +int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, +			     u32 idx)  { -	i40e_status ret_code = 0;  	struct i40e_hmc_sd_entry *sd_entry; +	int ret_code = 0;  	sd_entry = &hmc_info->sd_table.sd_entry[idx]; @@ -318,9 +318,9 @@ exit:   * @idx: segment descriptor index to find the relevant page descriptor   * @is_pf: used to distinguish between VF and PF   **/ -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, -					      struct i40e_hmc_info *hmc_info, -					      u32 idx, bool is_pf) +int i40e_remove_pd_page_new(struct i40e_hw *hw, +			    struct i40e_hmc_info *hmc_info, +			    u32 idx, bool is_pf)  {  	struct i40e_hmc_sd_entry *sd_entry; diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 3113792afaff..9960da07a573 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -187,28 +187,28 @@ struct i40e_hmc_info {  	/* add one more to the limit to correct our range */		\  	*(pd_limit) += 1;						\  } -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, -					      struct i40e_hmc_info *hmc_info, -					      u32 sd_index, -					      enum i40e_sd_entry_type type, -					      u64 direct_mode_sz); - -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, -					      struct i40e_hmc_info *hmc_info, -					      u32 pd_index, -					      struct i40e_dma_mem *rsrc_pg); -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, -					struct i40e_hmc_info *hmc_info, -					u32 idx); -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, -					     u32 idx); -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, -					    struct i40e_hmc_info *hmc_info, -					    u32 idx, bool is_pf); -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, -					       u32 idx); -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, -					      struct i40e_hmc_info *hmc_info, -					      u32 idx, bool is_pf); + +int i40e_add_sd_table_entry(struct i40e_hw *hw, +			    struct i40e_hmc_info *hmc_info, +			    u32 sd_index, +			    enum i40e_sd_entry_type type, +			    u64 direct_mode_sz); +int i40e_add_pd_table_entry(struct i40e_hw *hw, +			    struct i40e_hmc_info *hmc_info, +			    u32 pd_index, +			    struct i40e_dma_mem *rsrc_pg); +int i40e_remove_pd_bp(struct i40e_hw *hw, +		      struct i40e_hmc_info *hmc_info, +		      u32 idx); +int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, +			   u32 idx); +int i40e_remove_sd_bp_new(struct i40e_hw *hw, +			  struct i40e_hmc_info *hmc_info, +			  u32 idx, bool is_pf); +int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, +			     u32 idx); +int i40e_remove_pd_page_new(struct i40e_hw *hw, +			    struct i40e_hmc_info *hmc_info, +			    u32 idx, bool is_pf);  #endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index d6e92ecddfbd..40c101f286d1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -74,12 +74,12 @@ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,   * Assumptions:   *   - HMC Resource Profile has been selected before calling this function.   **/ -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, -					u32 rxq_num, u32 fcoe_cntx_num, -					u32 fcoe_filt_num) +int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, +		      u32 rxq_num, u32 fcoe_cntx_num, +		      u32 fcoe_filt_num)  {  	struct i40e_hmc_obj_info *obj, *full_obj; -	i40e_status ret_code = 0; +	int ret_code = 0;  	u64 l2fpm_size;  	u32 size_exp; @@ -229,11 +229,11 @@ init_lan_hmc_out:   *	1. caller can deallocate the memory used by pd after this function   *	   returns.   **/ -static i40e_status i40e_remove_pd_page(struct i40e_hw *hw, -						 struct i40e_hmc_info *hmc_info, -						 u32 idx) +static int i40e_remove_pd_page(struct i40e_hw *hw, +			       struct i40e_hmc_info *hmc_info, +			       u32 idx)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	if (!i40e_prep_remove_pd_page(hmc_info, idx))  		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); @@ -256,11 +256,11 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,   *	1. caller can deallocate the memory used by backing storage after this   *	   function returns.   **/ -static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw, -					       struct i40e_hmc_info *hmc_info, -					       u32 idx) +static int i40e_remove_sd_bp(struct i40e_hw *hw, +			     struct i40e_hmc_info *hmc_info, +			     u32 idx)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	if (!i40e_prep_remove_sd_bp(hmc_info, idx))  		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); @@ -276,15 +276,15 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,   * This will allocate memory for PDs and backing pages and populate   * the sd and pd entries.   **/ -static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, -				struct i40e_hmc_lan_create_obj_info *info) +static int i40e_create_lan_hmc_object(struct i40e_hw *hw, +				      struct i40e_hmc_lan_create_obj_info *info)  { -	i40e_status ret_code = 0;  	struct i40e_hmc_sd_entry *sd_entry;  	u32 pd_idx1 = 0, pd_lmt1 = 0;  	u32 pd_idx = 0, pd_lmt = 0;  	bool pd_error = false;  	u32 sd_idx, sd_lmt; +	int ret_code = 0;  	u64 sd_size;  	u32 i, j; @@ -435,13 +435,13 @@ exit:   * - This function will be called after i40e_init_lan_hmc() and before   *   any LAN/FCoE HMC objects can be created.   **/ -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, -					     enum i40e_hmc_model model) +int i40e_configure_lan_hmc(struct i40e_hw *hw, +			   enum i40e_hmc_model model)  {  	struct i40e_hmc_lan_create_obj_info info; -	i40e_status ret_code = 0;  	u8 hmc_fn_id = hw->hmc.hmc_fn_id;  	struct i40e_hmc_obj_info *obj; +	int ret_code = 0;  	/* Initialize part of the create object info struct */  	info.hmc_info = &hw->hmc; @@ -520,13 +520,13 @@ configure_lan_hmc_out:   * caller should deallocate memory allocated previously for   * book-keeping information about PDs and backing storage.   **/ -static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, -				struct i40e_hmc_lan_delete_obj_info *info) +static int i40e_delete_lan_hmc_object(struct i40e_hw *hw, +				      struct i40e_hmc_lan_delete_obj_info *info)  { -	i40e_status ret_code = 0;  	struct i40e_hmc_pd_table *pd_table;  	u32 pd_idx, pd_lmt, rel_pd_idx;  	u32 sd_idx, sd_lmt; +	int ret_code = 0;  	u32 i, j;  	if (NULL == info) { @@ -632,10 +632,10 @@ exit:   * This must be called by drivers as they are shutting down and being   * removed from the OS.   **/ -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw) +int i40e_shutdown_lan_hmc(struct i40e_hw *hw)  {  	struct i40e_hmc_lan_delete_obj_info info; -	i40e_status ret_code; +	int ret_code;  	info.hmc_info = &hw->hmc;  	info.rsrc_type = I40E_HMC_LAN_FULL; @@ -915,9 +915,9 @@ static void i40e_write_qword(u8 *hmc_bits,   * @context_bytes: pointer to the context bit array (DMA memory)   * @hmc_type: the type of HMC resource   **/ -static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw, -					u8 *context_bytes, -					enum i40e_hmc_lan_rsrc_type hmc_type) +static int i40e_clear_hmc_context(struct i40e_hw *hw, +				  u8 *context_bytes, +				  enum i40e_hmc_lan_rsrc_type hmc_type)  {  	/* clean the bit array */  	memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size); @@ -931,9 +931,9 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,   * @ce_info:  a description of the struct to be filled   * @dest:     the struct to be filled   **/ -static i40e_status i40e_set_hmc_context(u8 *context_bytes, -					struct i40e_context_ele *ce_info, -					u8 *dest) +static int i40e_set_hmc_context(u8 *context_bytes, +				struct i40e_context_ele *ce_info, +				u8 *dest)  {  	int f; @@ -973,18 +973,18 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,   * base pointer.  This function is used for LAN Queue contexts.   **/  static -i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, -				   enum i40e_hmc_lan_rsrc_type rsrc_type, -				   u32 obj_idx) +int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, +			   enum i40e_hmc_lan_rsrc_type rsrc_type, +			   u32 obj_idx)  {  	struct i40e_hmc_info *hmc_info = &hw->hmc;  	u32 obj_offset_in_sd, obj_offset_in_pd;  	struct i40e_hmc_sd_entry *sd_entry;  	struct i40e_hmc_pd_entry *pd_entry;  	u32 pd_idx, pd_lmt, rel_pd_idx; -	i40e_status ret_code = 0;  	u64 obj_offset_in_fpm;  	u32 sd_idx, sd_lmt; +	int ret_code = 0;  	if (NULL == hmc_info) {  		ret_code = I40E_ERR_BAD_PTR; @@ -1042,11 +1042,11 @@ exit:   * @hw:    the hardware struct   * @queue: the queue we care about   **/ -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, -						      u16 queue) +int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, +				    u16 queue)  { -	i40e_status err;  	u8 *context_bytes; +	int err;  	err = i40e_hmc_get_object_va(hw, &context_bytes,  				     I40E_HMC_LAN_TX, queue); @@ -1062,12 +1062,12 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,   * @queue: the queue we care about   * @s:     the struct to be filled   **/ -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, -						    u16 queue, -						    struct i40e_hmc_obj_txq *s) +int i40e_set_lan_tx_queue_context(struct i40e_hw *hw, +				  u16 queue, +				  struct i40e_hmc_obj_txq *s)  { -	i40e_status err;  	u8 *context_bytes; +	int err;  	err = i40e_hmc_get_object_va(hw, &context_bytes,  				     I40E_HMC_LAN_TX, queue); @@ -1083,11 +1083,11 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,   * @hw:    the hardware struct   * @queue: the queue we care about   **/ -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, -						      u16 queue) +int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, +				    u16 queue)  { -	i40e_status err;  	u8 *context_bytes; +	int err;  	err = i40e_hmc_get_object_va(hw, &context_bytes,  				     I40E_HMC_LAN_RX, queue); @@ -1103,12 +1103,12 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,   * @queue: the queue we care about   * @s:     the struct to be filled   **/ -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, -						    u16 queue, -						    struct i40e_hmc_obj_rxq *s) +int i40e_set_lan_rx_queue_context(struct i40e_hw *hw, +				  u16 queue, +				  struct i40e_hmc_obj_rxq *s)  { -	i40e_status err;  	u8 *context_bytes; +	int err;  	err = i40e_hmc_get_object_va(hw, &context_bytes,  				     I40E_HMC_LAN_RX, queue); diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index c46a2c449e60..9f960404c2b3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -137,22 +137,22 @@ struct i40e_hmc_lan_delete_obj_info {  	u32 count;  }; -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, -					u32 rxq_num, u32 fcoe_cntx_num, -					u32 fcoe_filt_num); -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, -					     enum i40e_hmc_model model); -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); - -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, -						      u16 queue); -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, -						    u16 queue, -						    struct i40e_hmc_obj_txq *s); -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, -						      u16 queue); -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, -						    u16 queue, -						    struct i40e_hmc_obj_rxq *s); +int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, +		      u32 rxq_num, u32 fcoe_cntx_num, +		      u32 fcoe_filt_num); +int i40e_configure_lan_hmc(struct i40e_hw *hw, +			   enum i40e_hmc_model model); +int i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, +				    u16 queue); +int i40e_set_lan_tx_queue_context(struct i40e_hw *hw, +				  u16 queue, +				  struct i40e_hmc_obj_txq *s); +int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, +				    u16 queue); +int i40e_set_lan_rx_queue_context(struct i40e_hw *hw, +				  u16 queue, +				  struct i40e_hmc_obj_rxq *s);  #endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b3cb587a2032..467001db5070 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -419,10 +419,10 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,  	unsigned int start;  	do { -		start = u64_stats_fetch_begin_irq(&ring->syncp); +		start = u64_stats_fetch_begin(&ring->syncp);  		packets = ring->stats.packets;  		bytes   = ring->stats.bytes; -	} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +	} while (u64_stats_fetch_retry(&ring->syncp, start));  	stats->tx_packets += packets;  	stats->tx_bytes   += bytes; @@ -472,10 +472,10 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,  		if (!ring)  			continue;  		do { -			start   = u64_stats_fetch_begin_irq(&ring->syncp); +			start   = u64_stats_fetch_begin(&ring->syncp);  			packets = ring->stats.packets;  			bytes   = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		stats->rx_packets += packets;  		stats->rx_bytes   += bytes; @@ -897,10 +897,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)  			continue;  		do { -			start = u64_stats_fetch_begin_irq(&p->syncp); +			start = u64_stats_fetch_begin(&p->syncp);  			packets = p->stats.packets;  			bytes = p->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&p->syncp, start)); +		} while (u64_stats_fetch_retry(&p->syncp, start));  		tx_b += bytes;  		tx_p += packets;  		tx_restart += p->tx_stats.restart_queue; @@ -915,10 +915,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)  			continue;  		do { -			start = u64_stats_fetch_begin_irq(&p->syncp); +			start = u64_stats_fetch_begin(&p->syncp);  			packets = p->stats.packets;  			bytes = p->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&p->syncp, start)); +		} while (u64_stats_fetch_retry(&p->syncp, start));  		rx_b += bytes;  		rx_p += packets;  		rx_buf += p->rx_stats.alloc_buff_failed; @@ -935,10 +935,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)  				continue;  			do { -				start = u64_stats_fetch_begin_irq(&p->syncp); +				start = u64_stats_fetch_begin(&p->syncp);  				packets = p->stats.packets;  				bytes = p->stats.bytes; -			} while (u64_stats_fetch_retry_irq(&p->syncp, start)); +			} while (u64_stats_fetch_retry(&p->syncp, start));  			tx_b += bytes;  			tx_p += packets;  			tx_restart += p->tx_stats.restart_queue; @@ -1817,13 +1817,13 @@ static int i40e_set_mac(struct net_device *netdev, void *p)  	spin_unlock_bh(&vsi->mac_filter_hash_lock);  	if (vsi->type == I40E_VSI_MAIN) { -		i40e_status ret; +		int ret;  		ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,  						addr->sa_data, NULL);  		if (ret) -			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", -				    i40e_stat_str(hw, ret), +			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n", +				    ERR_PTR(ret),  				    i40e_aq_str(hw, hw->aq.asq_last_status));  	} @@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,  		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);  		if (ret) {  			dev_info(&pf->pdev->dev, -				 "Cannot set RSS key, err %s aq_err %s\n", -				 i40e_stat_str(hw, ret), +				 "Cannot set RSS key, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(hw, hw->aq.asq_last_status));  			return ret;  		} @@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,  		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);  		if (ret) {  			dev_info(&pf->pdev->dev, -				 "Cannot set RSS lut, err %s aq_err %s\n", -				 i40e_stat_str(hw, ret), +				 "Cannot set RSS lut, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(hw, hw->aq.asq_last_status));  			return ret;  		} @@ -2349,7 +2349,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,  {  	struct i40e_hw *hw = &vsi->back->hw;  	enum i40e_admin_queue_err aq_status; -	i40e_status aq_ret; +	int aq_ret;  	aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,  					   &aq_status); @@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,  	if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {  		*retval = -EIO;  		dev_info(&vsi->back->pdev->dev, -			 "ignoring delete macvlan error on %s, err %s, aq_err %s\n", -			 vsi_name, i40e_stat_str(hw, aq_ret), +			 "ignoring delete macvlan error on %s, err %pe, aq_err %s\n", +			 vsi_name, ERR_PTR(aq_ret),  			 i40e_aq_str(hw, aq_status));  	}  } @@ -2423,13 +2423,13 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,   *   * Returns status indicating success or failure;   **/ -static i40e_status +static int  i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,  			  struct i40e_mac_filter *f)  {  	bool enable = f->state == I40E_FILTER_NEW;  	struct i40e_hw *hw = &vsi->back->hw; -	i40e_status aq_ret; +	int aq_ret;  	if (f->vlan == I40E_VLAN_ANY) {  		aq_ret = i40e_aq_set_vsi_broadcast(hw, @@ -2468,7 +2468,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)  {  	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];  	struct i40e_hw *hw = &pf->hw; -	i40e_status aq_ret; +	int aq_ret;  	if (vsi->type == I40E_VSI_MAIN &&  	    pf->lan_veb != I40E_NO_VEB && @@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)  							   NULL);  		if (aq_ret) {  			dev_info(&pf->pdev->dev, -				 "Set default VSI failed, err %s, aq_err %s\n", -				 i40e_stat_str(hw, aq_ret), +				 "Set default VSI failed, err %pe, aq_err %s\n", +				 ERR_PTR(aq_ret),  				 i40e_aq_str(hw, hw->aq.asq_last_status));  		}  	} else { @@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)  						  true);  		if (aq_ret) {  			dev_info(&pf->pdev->dev, -				 "set unicast promisc failed, err %s, aq_err %s\n", -				 i40e_stat_str(hw, aq_ret), +				 "set unicast promisc failed, err %pe, aq_err %s\n", +				 ERR_PTR(aq_ret),  				 i40e_aq_str(hw, hw->aq.asq_last_status));  		}  		aq_ret = i40e_aq_set_vsi_multicast_promiscuous( @@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)  						  promisc, NULL);  		if (aq_ret) {  			dev_info(&pf->pdev->dev, -				 "set multicast promisc failed, err %s, aq_err %s\n", -				 i40e_stat_str(hw, aq_ret), +				 "set multicast promisc failed, err %pe, aq_err %s\n", +				 ERR_PTR(aq_ret),  				 i40e_aq_str(hw, hw->aq.asq_last_status));  		}  	} @@ -2541,12 +2541,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  	unsigned int vlan_filters = 0;  	char vsi_name[16] = "PF";  	int filter_list_len = 0; -	i40e_status aq_ret = 0;  	u32 changed_flags = 0;  	struct hlist_node *h;  	struct i40e_pf *pf;  	int num_add = 0;  	int num_del = 0; +	int aq_ret = 0;  	int retval = 0;  	u16 cmd_flags;  	int list_size; @@ -2814,9 +2814,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  			retval = i40e_aq_rc_to_posix(aq_ret,  						     hw->aq.asq_last_status);  			dev_info(&pf->pdev->dev, -				 "set multi promisc failed on %s, err %s aq_err %s\n", +				 "set multi promisc failed on %s, err %pe aq_err %s\n",  				 vsi_name, -				 i40e_stat_str(hw, aq_ret), +				 ERR_PTR(aq_ret),  				 i40e_aq_str(hw, hw->aq.asq_last_status));  		} else {  			dev_info(&pf->pdev->dev, "%s allmulti mode.\n", @@ -2834,10 +2834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  			retval = i40e_aq_rc_to_posix(aq_ret,  						     hw->aq.asq_last_status);  			dev_info(&pf->pdev->dev, -				 "Setting promiscuous %s failed on %s, err %s aq_err %s\n", +				 "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",  				 cur_promisc ? "on" : "off",  				 vsi_name, -				 i40e_stat_str(hw, aq_ret), +				 ERR_PTR(aq_ret),  				 i40e_aq_str(hw, hw->aq.asq_last_status));  		}  	} @@ -2921,7 +2921,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)  	struct i40e_pf *pf = vsi->back;  	if (i40e_enabled_xdp_vsi(vsi)) { -		int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +		int frame_size = new_mtu + I40E_PACKET_HDR_PAD;  		if (frame_size > i40e_max_xdp_frame_size(vsi))  			return -EINVAL; @@ -2965,7 +2965,7 @@ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)  void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)  {  	struct i40e_vsi_context ctxt; -	i40e_status ret; +	int ret;  	/* Don't modify stripping options if a port VLAN is active */  	if (vsi->info.pvid) @@ -2985,8 +2985,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)  	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);  	if (ret) {  		dev_info(&vsi->back->pdev->dev, -			 "update vlan stripping failed, err %s aq_err %s\n", -			 i40e_stat_str(&vsi->back->hw, ret), +			 "update vlan stripping failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&vsi->back->hw,  				     vsi->back->hw.aq.asq_last_status));  	} @@ -2999,7 +2999,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)  void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)  {  	struct i40e_vsi_context ctxt; -	i40e_status ret; +	int ret;  	/* Don't modify stripping options if a port VLAN is active */  	if (vsi->info.pvid) @@ -3020,8 +3020,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)  	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);  	if (ret) {  		dev_info(&vsi->back->pdev->dev, -			 "update vlan stripping failed, err %s aq_err %s\n", -			 i40e_stat_str(&vsi->back->hw, ret), +			 "update vlan stripping failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&vsi->back->hw,  				     vsi->back->hw.aq.asq_last_status));  	} @@ -3252,7 +3252,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)  int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)  {  	struct i40e_vsi_context ctxt; -	i40e_status ret; +	int ret;  	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);  	vsi->info.pvid = cpu_to_le16(vid); @@ -3265,8 +3265,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)  	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);  	if (ret) {  		dev_info(&vsi->back->pdev->dev, -			 "add pvid failed, err %s aq_err %s\n", -			 i40e_stat_str(&vsi->back->hw, ret), +			 "add pvid failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&vsi->back->hw,  				     vsi->back->hw.aq.asq_last_status));  		return -ENOENT; @@ -3429,8 +3429,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)  	u16 pf_q = vsi->base_queue + ring->queue_index;  	struct i40e_hw *hw = &vsi->back->hw;  	struct i40e_hmc_obj_txq tx_ctx; -	i40e_status err = 0;  	u32 qtx_ctl = 0; +	int err = 0;  	if (ring_is_xdp(ring))  		ring->xsk_pool = i40e_xsk_pool(ring); @@ -3554,7 +3554,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)  	u16 pf_q = vsi->base_queue + ring->queue_index;  	struct i40e_hw *hw = &vsi->back->hw;  	struct i40e_hmc_obj_rxq rx_ctx; -	i40e_status err = 0; +	int err = 0;  	bool ok;  	int ret; @@ -3694,6 +3694,24 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)  }  /** + * i40e_calculate_vsi_rx_buf_len - Calculates buffer length + * + * @vsi: VSI to calculate rx_buf_len from + */ +static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi) +{ +	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) +		return I40E_RXBUFFER_2048; + +#if (PAGE_SIZE < 8192) +	if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN) +		return I40E_RXBUFFER_1536 - NET_IP_ALIGN; +#endif + +	return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048; +} + +/**   * i40e_vsi_configure_rx - Configure the VSI for Rx   * @vsi: the VSI being configured   * @@ -3704,20 +3722,14 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)  	int err = 0;  	u16 i; -	if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { -		vsi->max_frame = I40E_MAX_RXBUFFER; -		vsi->rx_buf_len = I40E_RXBUFFER_2048; +	vsi->max_frame = I40E_MAX_RXBUFFER; +	vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); +  #if (PAGE_SIZE < 8192) -	} else if (!I40E_2K_TOO_SMALL_WITH_PADDING && -		   (vsi->netdev->mtu <= ETH_DATA_LEN)) { +	if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && +	    vsi->netdev->mtu <= ETH_DATA_LEN)  		vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; -		vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;  #endif -	} else { -		vsi->max_frame = I40E_MAX_RXBUFFER; -		vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : -						       I40E_RXBUFFER_2048; -	}  	/* set up individual rings */  	for (i = 0; i < vsi->num_queue_pairs && !err; i++) @@ -4123,6 +4135,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)  		}  		/* register for affinity change notifications */ +		q_vector->irq_num = irq_num;  		q_vector->affinity_notify.notify = i40e_irq_affinity_notify;  		q_vector->affinity_notify.release = i40e_irq_affinity_release;  		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); @@ -5512,16 +5525,16 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)  	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw; -	i40e_status ret;  	u32 tc_bw_max; +	int ret;  	int i;  	/* Get the VSI level BW configuration */  	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't get PF vsi bw config, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't get PF vsi bw config, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return -EINVAL;  	} @@ -5531,8 +5544,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)  					       NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't get PF vsi ets bw config, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return -EINVAL;  	} @@ -5573,7 +5586,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,  {  	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;  	struct i40e_pf *pf = vsi->back; -	i40e_status ret; +	int ret;  	int i;  	/* There is no need to reset BW when mqprio mode is on.  */ @@ -5721,8 +5734,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)  	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);  	if (ret) { -		dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n", -			 i40e_stat_str(hw, ret), +		dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(hw, hw->aq.asq_last_status));  		return ret;  	} @@ -5777,8 +5790,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)  						  &bw_config, NULL);  		if (ret) {  			dev_info(&pf->pdev->dev, -				 "Failed querying vsi bw info, err %s aq_err %s\n", -				 i40e_stat_str(hw, ret), +				 "Failed querying vsi bw info, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(hw, hw->aq.asq_last_status));  			goto out;  		} @@ -5844,8 +5857,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)  	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Update vsi tc config failed, err %s aq_err %s\n", -			 i40e_stat_str(hw, ret), +			 "Update vsi tc config failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(hw, hw->aq.asq_last_status));  		goto out;  	} @@ -5857,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)  	ret = i40e_vsi_get_bw_info(vsi);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Failed updating vsi bw info, err %s aq_err %s\n", -			 i40e_stat_str(hw, ret), +			 "Failed updating vsi bw info, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(hw, hw->aq.asq_last_status));  		goto out;  	} @@ -5949,8 +5962,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)  					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);  	if (ret)  		dev_err(&pf->pdev->dev, -			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n", -			max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), +			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n", +			max_tx_rate, seid, ERR_PTR(ret),  			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	return ret;  } @@ -6025,8 +6038,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)  			last_aq_status = pf->hw.aq.asq_last_status;  			if (ret)  				dev_info(&pf->pdev->dev, -					 "Failed to delete cloud filter, err %s aq_err %s\n", -					 i40e_stat_str(&pf->hw, ret), +					 "Failed to delete cloud filter, err %pe aq_err %s\n", +					 ERR_PTR(ret),  					 i40e_aq_str(&pf->hw, last_aq_status));  			kfree(cfilter);  		} @@ -6160,8 +6173,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)  	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Cannot set RSS lut, err %s aq_err %s\n", -			 i40e_stat_str(hw, ret), +			 "Cannot set RSS lut, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(hw, hw->aq.asq_last_status));  		kfree(lut);  		return ret; @@ -6259,8 +6272,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,  	ret = i40e_aq_add_vsi(hw, &ctxt, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "add new vsi failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "add new vsi failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw,  				     pf->hw.aq.asq_last_status));  		return -ENOENT; @@ -6291,7 +6304,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,  				  u8 *bw_share)  {  	struct i40e_aqc_configure_vsi_tc_bw_data bw_data; -	i40e_status ret; +	int ret;  	int i;  	memset(&bw_data, 0, sizeof(bw_data)); @@ -6327,9 +6340,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf,  				       struct i40e_vsi *vsi,  				       struct i40e_channel *ch)  { -	i40e_status ret; -	int i;  	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; +	int ret; +	int i;  	/* Enable ETS TCs with equal BW Share for now across all VSIs */  	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -6505,8 +6518,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)  					mode, NULL);  	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)  		dev_err(&pf->pdev->dev, -			"couldn't set switch config bits, err %s aq_err %s\n", -			i40e_stat_str(hw, ret), +			"couldn't set switch config bits, err %pe aq_err %s\n", +			ERR_PTR(ret),  			i40e_aq_str(hw,  				    hw->aq.asq_last_status)); @@ -6706,8 +6719,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)  						   &bw_data, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "VEB bw config failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "VEB bw config failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		goto out;  	} @@ -6716,8 +6729,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)  	ret = i40e_veb_get_bw_info(veb);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Failed getting veb bw config, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Failed getting veb bw config, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	} @@ -6800,8 +6813,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)  	ret = i40e_aq_resume_port_tx(hw, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Resume Port Tx failed, err %s aq_err %s\n", -			  i40e_stat_str(&pf->hw, ret), +			 "Resume Port Tx failed, err %pe aq_err %s\n", +			  ERR_PTR(ret),  			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		/* Schedule PF reset to recover */  		set_bit(__I40E_PF_RESET_REQUESTED, pf->state); @@ -6825,8 +6838,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)  	ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Suspend Port Tx failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Suspend Port Tx failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		/* Schedule PF reset to recover */  		set_bit(__I40E_PF_RESET_REQUESTED, pf->state); @@ -6865,8 +6878,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,  	ret = i40e_set_dcb_config(&pf->hw);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Set DCB Config failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Set DCB Config failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		goto out;  	} @@ -6982,8 +6995,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)  		 i40e_aqc_opc_modify_switching_comp_ets, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Modify Port ETS failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Modify Port ETS failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		goto out;  	} @@ -7020,8 +7033,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)  	ret = i40e_aq_dcb_updated(&pf->hw, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "DCB Updated failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "DCB Updated failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		goto out;  	} @@ -7104,8 +7117,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)  		 i40e_aqc_opc_enable_switching_comp_ets, NULL);  	if (err) {  		dev_info(&pf->pdev->dev, -			 "Enable Port ETS failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, err), +			 "Enable Port ETS failed, err %pe aq_err %s\n", +			 ERR_PTR(err),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		err = -ENOENT;  		goto out; @@ -7184,8 +7197,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)  		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;  	} else {  		dev_info(&pf->pdev->dev, -			 "Query for DCB configuration failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, err), +			 "Query for DCB configuration failed, err %pe aq_err %s\n", +			 ERR_PTR(err),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	} @@ -7403,15 +7416,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)   * @pf: board private structure   * @is_up: whether the link state should be forced up or down   **/ -static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) +static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)  {  	struct i40e_aq_get_phy_abilities_resp abilities;  	struct i40e_aq_set_phy_config config = {0};  	bool non_zero_phy_type = is_up;  	struct i40e_hw *hw = &pf->hw; -	i40e_status err;  	u64 mask;  	u8 speed; +	int err;  	/* Card might've been put in an unstable state by other drivers  	 * and applications, which causes incorrect speed values being @@ -7423,8 +7436,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)  					   NULL);  	if (err) {  		dev_err(&pf->pdev->dev, -			"failed to get phy cap., ret =  %s last_status =  %s\n", -			i40e_stat_str(hw, err), +			"failed to get phy cap., ret =  %pe last_status =  %s\n", +			ERR_PTR(err),  			i40e_aq_str(hw, hw->aq.asq_last_status));  		return err;  	} @@ -7435,8 +7448,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)  					   NULL);  	if (err) {  		dev_err(&pf->pdev->dev, -			"failed to get phy cap., ret =  %s last_status =  %s\n", -			i40e_stat_str(hw, err), +			"failed to get phy cap., ret =  %pe last_status =  %s\n", +			ERR_PTR(err),  			i40e_aq_str(hw, hw->aq.asq_last_status));  		return err;  	} @@ -7480,8 +7493,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)  	if (err) {  		dev_err(&pf->pdev->dev, -			"set phy config ret =  %s last_status =  %s\n", -			i40e_stat_str(&pf->hw, err), +			"set phy config ret =  %pe last_status =  %s\n", +			ERR_PTR(err),  			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return err;  	} @@ -7644,11 +7657,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)   * This function deletes a mac filter on the channel VSI which serves as the   * macvlan. Returns 0 on success.   **/ -static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, -					   const u8 *macaddr, int *aq_err) +static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, +				   const u8 *macaddr, int *aq_err)  {  	struct i40e_aqc_remove_macvlan_element_data element; -	i40e_status status; +	int status;  	memset(&element, 0, sizeof(element));  	ether_addr_copy(element.mac_addr, macaddr); @@ -7670,12 +7683,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,   * This function adds a mac filter on the channel VSI which serves as the   * macvlan. Returns 0 on success.   **/ -static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, -					   const u8 *macaddr, int *aq_err) +static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, +				   const u8 *macaddr, int *aq_err)  {  	struct i40e_aqc_add_macvlan_element_data element; -	i40e_status status;  	u16 cmd_flags = 0; +	int status;  	ether_addr_copy(element.mac_addr, macaddr);  	element.vlan_tag = 0; @@ -7821,8 +7834,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,  			rx_ring->netdev = NULL;  		}  		dev_info(&pf->pdev->dev, -			 "Error adding mac filter on macvlan err %s, aq_err %s\n", -			  i40e_stat_str(hw, ret), +			 "Error adding mac filter on macvlan err %pe, aq_err %s\n", +			  ERR_PTR(ret),  			  i40e_aq_str(hw, aq_err));  		netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");  	} @@ -7894,8 +7907,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,  	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Update vsi tc config failed, err %s aq_err %s\n", -			 i40e_stat_str(hw, ret), +			 "Update vsi tc config failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(hw, hw->aq.asq_last_status));  		return ret;  	} @@ -8110,8 +8123,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)  				ch->fwd = NULL;  			} else {  				dev_info(&pf->pdev->dev, -					 "Error deleting mac filter on macvlan err %s, aq_err %s\n", -					  i40e_stat_str(hw, ret), +					 "Error deleting mac filter on macvlan err %pe, aq_err %s\n", +					  ERR_PTR(ret),  					  i40e_aq_str(hw, aq_err));  			}  			break; @@ -8862,8 +8875,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,  	kfree(filter);  	if (err) {  		dev_err(&pf->pdev->dev, -			"Failed to delete cloud filter, err %s\n", -			i40e_stat_str(&pf->hw, err)); +			"Failed to delete cloud filter, err %pe\n", +			ERR_PTR(err));  		return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);  	} @@ -9425,8 +9438,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,  			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;  		} else {  			dev_info(&pf->pdev->dev, -				 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +				 "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  		} @@ -9874,8 +9887,8 @@ static void i40e_link_event(struct i40e_pf *pf)  {  	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];  	u8 new_link_speed, old_link_speed; -	i40e_status status;  	bool new_link, old_link; +	int status;  #ifdef CONFIG_I40E_DCB  	int err;  #endif /* CONFIG_I40E_DCB */ @@ -10086,9 +10099,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)  	struct i40e_arq_event_info event;  	struct i40e_hw *hw = &pf->hw;  	u16 pending, i = 0; -	i40e_status ret;  	u16 opcode;  	u32 oldval; +	int ret;  	u32 val;  	/* Do not run clean AQ when PF reset fails */ @@ -10252,8 +10265,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)  	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't get PF vsi config, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't get PF vsi config, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return;  	} @@ -10264,8 +10277,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)  	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "update vsi switch failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "update vsi switch failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	}  } @@ -10288,8 +10301,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)  	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't get PF vsi config, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't get PF vsi config, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return;  	} @@ -10300,8 +10313,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)  	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "update vsi switch failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "update vsi switch failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	}  } @@ -10445,8 +10458,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,  			buf_len = data_size;  		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {  			dev_info(&pf->pdev->dev, -				 "capability discovery failed, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, err), +				 "capability discovery failed, err %pe aq_err %s\n", +				 ERR_PTR(err),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  			return -ENODEV; @@ -10567,7 +10580,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)  	struct i40e_cloud_filter *cfilter;  	struct i40e_pf *pf = vsi->back;  	struct hlist_node *node; -	i40e_status ret; +	int ret;  	/* Add cloud filters back if they exist */  	hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, @@ -10583,8 +10596,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)  		if (ret) {  			dev_dbg(&pf->pdev->dev, -				"Failed to rebuild cloud filter, err %s aq_err %s\n", -				i40e_stat_str(&pf->hw, ret), +				"Failed to rebuild cloud filter, err %pe aq_err %s\n", +				ERR_PTR(ret),  				i40e_aq_str(&pf->hw,  					    pf->hw.aq.asq_last_status));  			return ret; @@ -10602,7 +10615,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)  static int i40e_rebuild_channels(struct i40e_vsi *vsi)  {  	struct i40e_channel *ch, *ch_tmp; -	i40e_status ret; +	int ret;  	if (list_empty(&vsi->ch_list))  		return 0; @@ -10655,6 +10668,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)  }  /** + * i40e_clean_xps_state - clean xps state for every tx_ring + * @vsi: ptr to the VSI + **/ +static void i40e_clean_xps_state(struct i40e_vsi *vsi) +{ +	int i; + +	if (vsi->tx_rings) +		for (i = 0; i < vsi->num_queue_pairs; i++) +			if (vsi->tx_rings[i]) +				clear_bit(__I40E_TX_XPS_INIT_DONE, +					  vsi->tx_rings[i]->state); +} + +/**   * i40e_prep_for_reset - prep for the core to reset   * @pf: board private structure   * @@ -10663,7 +10691,7 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)  static void i40e_prep_for_reset(struct i40e_pf *pf)  {  	struct i40e_hw *hw = &pf->hw; -	i40e_status ret = 0; +	int ret = 0;  	u32 v;  	clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); @@ -10678,8 +10706,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)  	i40e_pf_quiesce_all_vsi(pf);  	for (v = 0; v < pf->num_alloc_vsi; v++) { -		if (pf->vsi[v]) +		if (pf->vsi[v]) { +			i40e_clean_xps_state(pf->vsi[v]);  			pf->vsi[v]->seid = 0; +		}  	}  	i40e_shutdown_adminq(&pf->hw); @@ -10766,7 +10796,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw)  static int i40e_reset(struct i40e_pf *pf)  {  	struct i40e_hw *hw = &pf->hw; -	i40e_status ret; +	int ret;  	ret = i40e_pf_reset(hw);  	if (ret) { @@ -10791,7 +10821,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)  	const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);  	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];  	struct i40e_hw *hw = &pf->hw; -	i40e_status ret; +	int ret;  	u32 val;  	int v; @@ -10807,8 +10837,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)  	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */  	ret = i40e_init_adminq(&pf->hw);  	if (ret) { -		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		goto clear_recovery;  	} @@ -10919,8 +10949,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)  					 I40E_AQ_EVENT_MEDIA_NA |  					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);  	if (ret) -		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +		dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	/* Rebuild the VSIs and VEBs that existed before reset. @@ -11023,8 +11053,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)  		msleep(75);  		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);  		if (ret) -			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +			dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  	} @@ -11052,9 +11082,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)  	ret = i40e_set_promiscuous(pf, pf->cur_promisc);  	if (ret)  		dev_warn(&pf->pdev->dev, -			 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n", +			 "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",  			 pf->cur_promisc ? "on" : "off", -			 i40e_stat_str(&pf->hw, ret), +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	i40e_reset_all_vfs(pf, true); @@ -12188,8 +12218,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,  			(struct i40e_aqc_get_set_rss_key_data *)seed);  		if (ret) {  			dev_info(&pf->pdev->dev, -				 "Cannot get RSS key, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +				 "Cannot get RSS key, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  			return ret; @@ -12202,8 +12232,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,  		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);  		if (ret) {  			dev_info(&pf->pdev->dev, -				 "Cannot get RSS lut, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +				 "Cannot get RSS lut, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  			return ret; @@ -12478,11 +12508,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)   * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition   * @pf: board private structure   **/ -i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf) +int i40e_get_partition_bw_setting(struct i40e_pf *pf)  { -	i40e_status status;  	bool min_valid, max_valid;  	u32 max_bw, min_bw; +	int status;  	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,  					   &min_valid, &max_valid); @@ -12501,10 +12531,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)   * i40e_set_partition_bw_setting - Set BW settings for this PF partition   * @pf: board private structure   **/ -i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf) +int i40e_set_partition_bw_setting(struct i40e_pf *pf)  {  	struct i40e_aqc_configure_partition_bw_data bw_data; -	i40e_status status; +	int status;  	memset(&bw_data, 0, sizeof(bw_data)); @@ -12523,12 +12553,12 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)   * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition   * @pf: board private structure   **/ -i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) +int i40e_commit_partition_bw_setting(struct i40e_pf *pf)  {  	/* Commit temporary BW setting to permanent NVM image */  	enum i40e_admin_queue_err last_aq_status; -	i40e_status ret;  	u16 nvm_word; +	int ret;  	if (pf->hw.partition_id != 1) {  		dev_info(&pf->pdev->dev, @@ -12543,8 +12573,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)  	last_aq_status = pf->hw.aq.asq_last_status;  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Cannot acquire NVM for read access, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Cannot acquire NVM for read access, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, last_aq_status));  		goto bw_commit_out;  	} @@ -12560,8 +12590,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)  	last_aq_status = pf->hw.aq.asq_last_status;  	i40e_release_nvm(&pf->hw);  	if (ret) { -		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +		dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, last_aq_status));  		goto bw_commit_out;  	} @@ -12574,8 +12604,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)  	last_aq_status = pf->hw.aq.asq_last_status;  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "Cannot acquire NVM for write access, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "Cannot acquire NVM for write access, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, last_aq_status));  		goto bw_commit_out;  	} @@ -12594,8 +12624,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)  	i40e_release_nvm(&pf->hw);  	if (ret)  		dev_info(&pf->pdev->dev, -			 "BW settings NOT SAVED, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "BW settings NOT SAVED, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, last_aq_status));  bw_commit_out: @@ -12616,7 +12646,7 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)  #define I40E_LINK_BEHAVIOR_WORD_LENGTH		0x1  #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED	BIT(0)  #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH	4 -	i40e_status read_status = I40E_SUCCESS; +	int read_status = I40E_SUCCESS;  	u16 sr_emp_sr_settings_ptr = 0;  	u16 features_enable = 0;  	u16 link_behavior = 0; @@ -12649,8 +12679,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)  err_nvm:  	dev_warn(&pf->pdev->dev, -		 "total-port-shutdown feature is off due to read nvm error: %s\n", -		 i40e_stat_str(&pf->hw, read_status)); +		 "total-port-shutdown feature is off due to read nvm error: %pe\n", +		 ERR_PTR(read_status));  	return ret;  } @@ -12920,6 +12950,29 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)  }  /** + * i40e_set_loopback - turn on/off loopback mode on underlying PF + * @vsi: ptr to VSI + * @ena: flag to indicate the on/off setting + */ +static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena) +{ +	bool if_running = netif_running(vsi->netdev) && +			  !test_and_set_bit(__I40E_VSI_DOWN, vsi->state); +	int ret; + +	if (if_running) +		i40e_down(vsi); + +	ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); +	if (ret) +		netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); +	if (if_running) +		i40e_up(vsi); + +	return ret; +} + +/**   * i40e_set_features - set the netdev feature flags   * @netdev: ptr to the netdev being adjusted   * @features: the feature set that the stack is suggesting @@ -12959,6 +13012,9 @@ static int i40e_set_features(struct net_device *netdev,  	if (need_reset)  		i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); +	if ((features ^ netdev->features) & NETIF_F_LOOPBACK) +		return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); +  	return 0;  } @@ -12969,7 +13025,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_hw *hw = &np->vsi->back->hw;  	u8 type, filter_index; -	i40e_status ret; +	int ret;  	type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :  						   I40E_AQC_TUNNEL_TYPE_NGE; @@ -12977,8 +13033,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,  	ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,  				     NULL);  	if (ret) { -		netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n", -			    i40e_stat_str(hw, ret), +		netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n", +			    ERR_PTR(ret),  			    i40e_aq_str(hw, hw->aq.asq_last_status));  		return -EIO;  	} @@ -12993,12 +13049,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_hw *hw = &np->vsi->back->hw; -	i40e_status ret; +	int ret;  	ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);  	if (ret) { -		netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n", -			    i40e_stat_str(hw, ret), +		netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n", +			    ERR_PTR(ret),  			    i40e_aq_str(hw, hw->aq.asq_last_status));  		return -EIO;  	} @@ -13111,6 +13167,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,  	}  	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); +	if (!br_spec) +		return -EINVAL;  	nla_for_each_nested(attr, br_spec, rem) {  		__u16 mode; @@ -13265,7 +13323,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,  	int i;  	/* Don't allow frames that span over multiple buffers */ -	if (frame_size > vsi->rx_buf_len) { +	if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {  		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");  		return -EINVAL;  	} @@ -13283,9 +13341,11 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,  	old_prog = xchg(&vsi->xdp_prog, prog);  	if (need_reset) { -		if (!prog) +		if (!prog) { +			xdp_features_clear_redirect_target(vsi->netdev);  			/* Wait until ndo_xsk_wakeup completes. */  			synchronize_rcu(); +		}  		i40e_reset_and_rebuild(pf, true, true);  	} @@ -13306,11 +13366,13 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,  	/* Kick start the NAPI context if there is an AF_XDP socket open  	 * on that queue id. This so that receiving will start.  	 */ -	if (need_reset && prog) +	if (need_reset && prog) {  		for (i = 0; i < vsi->num_queue_pairs; i++)  			if (vsi->xdp_rings[i]->xsk_pool)  				(void)i40e_xsk_wakeup(vsi->netdev, i,  						      XDP_WAKEUP_RX); +		xdp_features_set_redirect_target(vsi->netdev, true); +	}  	return 0;  } @@ -13721,7 +13783,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)  	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))  		hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; -	netdev->hw_features |= hw_features; +	netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;  	netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;  	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; @@ -13745,6 +13807,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)  		spin_lock_bh(&vsi->mac_filter_hash_lock);  		i40e_add_mac_filter(vsi, mac_addr);  		spin_unlock_bh(&vsi->mac_filter_hash_lock); + +		netdev->xdp_features = NETDEV_XDP_ACT_BASIC | +				       NETDEV_XDP_ACT_REDIRECT | +				       NETDEV_XDP_ACT_XSK_ZEROCOPY;  	} else {  		/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we  		 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to @@ -13885,8 +13951,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  		ctxt.flags = I40E_AQ_VSI_TYPE_PF;  		if (ret) {  			dev_info(&pf->pdev->dev, -				 "couldn't get PF vsi config, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +				 "couldn't get PF vsi config, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  			return -ENOENT; @@ -13915,8 +13981,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);  			if (ret) {  				dev_info(&pf->pdev->dev, -					 "update vsi failed, err %s aq_err %s\n", -					 i40e_stat_str(&pf->hw, ret), +					 "update vsi failed, err %d aq_err %s\n", +					 ret,  					 i40e_aq_str(&pf->hw,  						     pf->hw.aq.asq_last_status));  				ret = -ENOENT; @@ -13935,8 +14001,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);  			if (ret) {  				dev_info(&pf->pdev->dev, -					 "update vsi failed, err %s aq_err %s\n", -					 i40e_stat_str(&pf->hw, ret), +					 "update vsi failed, err %pe aq_err %s\n", +					 ERR_PTR(ret),  					 i40e_aq_str(&pf->hw,  						    pf->hw.aq.asq_last_status));  				ret = -ENOENT; @@ -13958,9 +14024,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  				 * message and continue  				 */  				dev_info(&pf->pdev->dev, -					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", +					 "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",  					 enabled_tc, -					 i40e_stat_str(&pf->hw, ret), +					 ERR_PTR(ret),  					 i40e_aq_str(&pf->hw,  						    pf->hw.aq.asq_last_status));  			} @@ -14054,8 +14120,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);  		if (ret) {  			dev_info(&vsi->back->pdev->dev, -				 "add vsi failed, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +				 "add vsi failed, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  			ret = -ENOENT; @@ -14086,8 +14152,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  	ret = i40e_vsi_get_bw_info(vsi);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't get vsi bw info, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't get vsi bw info, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		/* VSI is already added so not tearing that up */  		ret = 0; @@ -14533,8 +14599,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)  						  &bw_data, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "query veb bw config failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "query veb bw config failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));  		goto out;  	} @@ -14543,8 +14609,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)  						   &ets_data, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "query veb bw ets config failed, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "query veb bw ets config failed, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));  		goto out;  	} @@ -14740,8 +14806,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)  	/* get a VEB from the hardware */  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't add VEB, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't add VEB, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return -EPERM;  	} @@ -14751,16 +14817,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)  					 &veb->stats_idx, NULL, NULL, NULL);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't get VEB statistics idx, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't get VEB statistics idx, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return -EPERM;  	}  	ret = i40e_veb_get_bw_info(veb);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't get VEB bw info, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't get VEB bw info, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);  		return -ENOENT; @@ -14970,8 +15036,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)  						&next_seid, NULL);  		if (ret) {  			dev_info(&pf->pdev->dev, -				 "get switch config failed err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +				 "get switch config failed err %d aq_err %s\n", +				 ret,  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  			kfree(aq_buf); @@ -15016,8 +15082,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui  	ret = i40e_fetch_switch_configuration(pf, false);  	if (ret) {  		dev_info(&pf->pdev->dev, -			 "couldn't fetch switch config, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, ret), +			 "couldn't fetch switch config, err %pe aq_err %s\n", +			 ERR_PTR(ret),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		return ret;  	} @@ -15043,8 +15109,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui  						NULL);  		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {  			dev_info(&pf->pdev->dev, -				 "couldn't set switch config bits, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, ret), +				 "couldn't set switch config bits, err %pe aq_err %s\n", +				 ERR_PTR(ret),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  			/* not a fatal problem, just keep going */ @@ -15381,13 +15447,12 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)   *   * Return 0 on success, negative on failure.   **/ -static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) +static int i40e_pf_loop_reset(struct i40e_pf *pf)  {  	/* wait max 10 seconds for PF reset to succeed */  	const unsigned long time_end = jiffies + 10 * HZ; -  	struct i40e_hw *hw = &pf->hw; -	i40e_status ret; +	int ret;  	ret = i40e_pf_reset(hw);  	while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) { @@ -15433,9 +15498,9 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf)   * Return 0 if NIC is healthy or negative value when there are issues   * with resets   **/ -static i40e_status i40e_handle_resets(struct i40e_pf *pf) +static int i40e_handle_resets(struct i40e_pf *pf)  { -	const i40e_status pfr = i40e_pf_loop_reset(pf); +	const int pfr = i40e_pf_loop_reset(pf);  	const bool is_empr = i40e_check_fw_empr(pf);  	if (is_empr || pfr != I40E_SUCCESS) @@ -15530,10 +15595,9 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)  err_switch_setup:  	i40e_reset_interrupt_capability(pf); -	del_timer_sync(&pf->service_timer); +	timer_shutdown_sync(&pf->service_timer);  	i40e_shutdown_adminq(hw);  	iounmap(hw->hw_addr); -	pci_disable_pcie_error_reporting(pf->pdev);  	pci_release_mem_regions(pf->pdev);  	pci_disable_device(pf->pdev);  	kfree(pf); @@ -15573,13 +15637,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	struct i40e_aq_get_phy_abilities_resp abilities;  #ifdef CONFIG_I40E_DCB  	enum i40e_get_fw_lldp_status_resp lldp_status; -	i40e_status status;  #endif /* CONFIG_I40E_DCB */  	struct i40e_pf *pf;  	struct i40e_hw *hw;  	static u16 pfs_found;  	u16 wol_nvm_bits;  	u16 link_status; +#ifdef CONFIG_I40E_DCB +	int status; +#endif /* CONFIG_I40E_DCB */  	int err;  	u32 val;  	u32 i; @@ -15604,7 +15670,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		goto err_pci_reg;  	} -	pci_enable_pcie_error_reporting(pdev);  	pci_set_master(pdev);  	/* Now that we have a PCI connection, we need to do the @@ -15948,8 +16013,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  					 I40E_AQ_EVENT_MEDIA_NA |  					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);  	if (err) -		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", -			 i40e_stat_str(&pf->hw, err), +		dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", +			 ERR_PTR(err),  			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	/* Reconfigure hardware for allowing smaller MSS in the case @@ -15967,8 +16032,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		msleep(75);  		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);  		if (err) -			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, err), +			dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", +				 ERR_PTR(err),  				 i40e_aq_str(&pf->hw,  					     pf->hw.aq.asq_last_status));  	} @@ -16100,8 +16165,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	/* get the requested speeds from the fw */  	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);  	if (err) -		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n", -			i40e_stat_str(&pf->hw, err), +		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %pe last_status =  %s\n", +			ERR_PTR(err),  			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	pf->hw.phy.link_info.requested_speeds = abilities.link_speed; @@ -16111,8 +16176,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	/* get the supported phy types from the fw */  	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);  	if (err) -		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n", -			i40e_stat_str(&pf->hw, err), +		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %pe last_status =  %s\n", +			ERR_PTR(err),  			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  	/* make sure the MFS hasn't been set lower than the default */ @@ -16149,7 +16214,7 @@ err_vsis:  	kfree(pf->vsi);  err_switch_setup:  	i40e_reset_interrupt_capability(pf); -	del_timer_sync(&pf->service_timer); +	timer_shutdown_sync(&pf->service_timer);  err_mac_addr:  err_configure_lan_hmc:  	(void)i40e_shutdown_lan_hmc(hw); @@ -16162,7 +16227,6 @@ err_pf_reset:  err_ioremap:  	kfree(pf);  err_pf_alloc: -	pci_disable_pcie_error_reporting(pdev);  	pci_release_mem_regions(pdev);  err_pci_reg:  err_dma: @@ -16183,7 +16247,7 @@ static void i40e_remove(struct pci_dev *pdev)  {  	struct i40e_pf *pf = pci_get_drvdata(pdev);  	struct i40e_hw *hw = &pf->hw; -	i40e_status ret_code; +	int ret_code;  	int i;  	i40e_dbg_pf_exit(pf); @@ -16211,7 +16275,7 @@ static void i40e_remove(struct pci_dev *pdev)  	set_bit(__I40E_SUSPENDED, pf->state);  	set_bit(__I40E_DOWN, pf->state);  	if (pf->service_timer.function) -		del_timer_sync(&pf->service_timer); +		timer_shutdown_sync(&pf->service_timer);  	if (pf->service_task.func)  		cancel_work_sync(&pf->service_task); @@ -16310,7 +16374,6 @@ unmap:  	kfree(pf);  	pci_release_mem_regions(pdev); -	pci_disable_pcie_error_reporting(pdev);  	pci_disable_device(pdev);  } @@ -16431,9 +16494,9 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)  static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)  {  	struct i40e_hw *hw = &pf->hw; -	i40e_status ret;  	u8 mac_addr[6];  	u16 flags = 0; +	int ret;  	/* Get current MAC address in case it's an LAA */  	if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 3a38bf8bcde7..9da0c87f0328 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -13,10 +13,10 @@   * in this file) as an equivalent of the FLASH part mapped into the SR.   * We are accessing FLASH always thru the Shadow RAM.   **/ -i40e_status i40e_init_nvm(struct i40e_hw *hw) +int i40e_init_nvm(struct i40e_hw *hw)  {  	struct i40e_nvm_info *nvm = &hw->nvm; -	i40e_status ret_code = 0; +	int ret_code = 0;  	u32 fla, gens;  	u8 sr_size; @@ -52,12 +52,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)   * This function will request NVM ownership for reading   * via the proper Admin Command.   **/ -i40e_status i40e_acquire_nvm(struct i40e_hw *hw, -				       enum i40e_aq_resource_access_type access) +int i40e_acquire_nvm(struct i40e_hw *hw, +		     enum i40e_aq_resource_access_type access)  { -	i40e_status ret_code = 0;  	u64 gtime, timeout;  	u64 time_left = 0; +	int ret_code = 0;  	if (hw->nvm.blank_nvm_mode)  		goto i40e_i40e_acquire_nvm_exit; @@ -111,7 +111,7 @@ i40e_i40e_acquire_nvm_exit:   **/  void i40e_release_nvm(struct i40e_hw *hw)  { -	i40e_status ret_code = I40E_SUCCESS; +	int ret_code = I40E_SUCCESS;  	u32 total_delay = 0;  	if (hw->nvm.blank_nvm_mode) @@ -138,9 +138,9 @@ void i40e_release_nvm(struct i40e_hw *hw)   *   * Polls the SRCTL Shadow RAM register done bit.   **/ -static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) +static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)  { -	i40e_status ret_code = I40E_ERR_TIMEOUT; +	int ret_code = I40E_ERR_TIMEOUT;  	u32 srctl, wait_cnt;  	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */ @@ -165,10 +165,10 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)   *   * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.   **/ -static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, -					    u16 *data) +static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, +				    u16 *data)  { -	i40e_status ret_code = I40E_ERR_TIMEOUT; +	int ret_code = I40E_ERR_TIMEOUT;  	u32 sr_reg;  	if (offset >= hw->nvm.sr_size) { @@ -216,13 +216,13 @@ read_nvm_exit:   *   * Writes a 16 bit words buffer to the Shadow RAM using the admin command.   **/ -static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, -				    u8 module_pointer, u32 offset, -				    u16 words, void *data, -				    bool last_command) +static int i40e_read_nvm_aq(struct i40e_hw *hw, +			    u8 module_pointer, u32 offset, +			    u16 words, void *data, +			    bool last_command)  { -	i40e_status ret_code = I40E_ERR_NVM;  	struct i40e_asq_cmd_details cmd_details; +	int ret_code = I40E_ERR_NVM;  	memset(&cmd_details, 0, sizeof(cmd_details));  	cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -264,10 +264,10 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,   *   * Reads one 16 bit word from the Shadow RAM using the AdminQ   **/ -static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, -					 u16 *data) +static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, +				 u16 *data)  { -	i40e_status ret_code = I40E_ERR_TIMEOUT; +	int ret_code = I40E_ERR_TIMEOUT;  	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);  	*data = le16_to_cpu(*(__le16 *)data); @@ -286,8 +286,8 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,   * Do not use this function except in cases where the nvm lock is already   * taken via i40e_acquire_nvm().   **/ -static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, -					u16 offset, u16 *data) +static int __i40e_read_nvm_word(struct i40e_hw *hw, +				u16 offset, u16 *data)  {  	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)  		return i40e_read_nvm_word_aq(hw, offset, data); @@ -303,10 +303,10 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,   *   * Reads one 16 bit word from the Shadow RAM.   **/ -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, -			       u16 *data) +int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, +		       u16 *data)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)  		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); @@ -330,17 +330,17 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,   * @words_data_size: Words to read from NVM   * @data_ptr: Pointer to memory location where resulting buffer will be stored   **/ -enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw, -						u8 module_ptr, -						u16 module_offset, -						u16 data_offset, -						u16 words_data_size, -						u16 *data_ptr) +int i40e_read_nvm_module_data(struct i40e_hw *hw, +			      u8 module_ptr, +			      u16 module_offset, +			      u16 data_offset, +			      u16 words_data_size, +			      u16 *data_ptr)  { -	i40e_status status;  	u16 specific_ptr = 0;  	u16 ptr_value = 0;  	u32 offset = 0; +	int status;  	if (module_ptr != 0) {  		status = i40e_read_nvm_word(hw, module_ptr, &ptr_value); @@ -406,10 +406,10 @@ enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,   * method. The buffer read is preceded by the NVM ownership take   * and followed by the release.   **/ -static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, -					      u16 *words, u16 *data) +static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, +				      u16 *words, u16 *data)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	u16 index, word;  	/* Loop thru the selected region */ @@ -437,13 +437,13 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,   * method. The buffer read is preceded by the NVM ownership take   * and followed by the release.   **/ -static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, -					   u16 *words, u16 *data) +static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, +				   u16 *words, u16 *data)  { -	i40e_status ret_code; -	u16 read_size;  	bool last_cmd = false;  	u16 words_read = 0; +	u16 read_size; +	int ret_code;  	u16 i = 0;  	do { @@ -493,9 +493,9 @@ read_nvm_buffer_aq_exit:   * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()   * method.   **/ -static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, -					  u16 offset, u16 *words, -					  u16 *data) +static int __i40e_read_nvm_buffer(struct i40e_hw *hw, +				  u16 offset, u16 *words, +				  u16 *data)  {  	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)  		return i40e_read_nvm_buffer_aq(hw, offset, words, data); @@ -514,10 +514,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,   * method. The buffer read is preceded by the NVM ownership take   * and followed by the release.   **/ -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, -				 u16 *words, u16 *data) +int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, +			 u16 *words, u16 *data)  { -	i40e_status ret_code = 0; +	int ret_code = 0;  	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {  		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); @@ -544,12 +544,12 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,   *   * Writes a 16 bit words buffer to the Shadow RAM using the admin command.   **/ -static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, -				     u32 offset, u16 words, void *data, -				     bool last_command) +static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, +			     u32 offset, u16 words, void *data, +			     bool last_command)  { -	i40e_status ret_code = I40E_ERR_NVM;  	struct i40e_asq_cmd_details cmd_details; +	int ret_code = I40E_ERR_NVM;  	memset(&cmd_details, 0, sizeof(cmd_details));  	cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -594,14 +594,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,   * is customer specific and unknown. Therefore, this function skips all maximum   * possible size of VPD (1kB).   **/ -static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, -						    u16 *checksum) +static int i40e_calc_nvm_checksum(struct i40e_hw *hw, +				  u16 *checksum)  { -	i40e_status ret_code;  	struct i40e_virt_mem vmem;  	u16 pcie_alt_module = 0;  	u16 checksum_local = 0;  	u16 vpd_module = 0; +	int ret_code;  	u16 *data;  	u16 i = 0; @@ -675,11 +675,11 @@ i40e_calc_nvm_checksum_exit:   * on ARQ completion event reception by caller.   * This function will commit SR to NVM.   **/ -i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) +int i40e_update_nvm_checksum(struct i40e_hw *hw)  { -	i40e_status ret_code; -	u16 checksum;  	__le16 le_sum; +	int ret_code; +	u16 checksum;  	ret_code = i40e_calc_nvm_checksum(hw, &checksum);  	if (!ret_code) { @@ -699,12 +699,12 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)   * Performs checksum calculation and validates the NVM SW checksum. If the   * caller does not need checksum, the value can be NULL.   **/ -i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, -						 u16 *checksum) +int i40e_validate_nvm_checksum(struct i40e_hw *hw, +			       u16 *checksum)  { -	i40e_status ret_code = 0; -	u16 checksum_sr = 0;  	u16 checksum_local = 0; +	u16 checksum_sr = 0; +	int ret_code = 0;  	/* We must acquire the NVM lock in order to correctly synchronize the  	 * NVM accesses across multiple PFs. Without doing so it is possible @@ -733,36 +733,36 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,  	return ret_code;  } -static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, -					  struct i40e_nvm_access *cmd, -					  u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, -					     struct i40e_nvm_access *cmd, -					     u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, -					     struct i40e_nvm_access *cmd, -					     u8 *bytes, int *errno); +static int i40e_nvmupd_state_init(struct i40e_hw *hw, +				  struct i40e_nvm_access *cmd, +				  u8 *bytes, int *perrno); +static int i40e_nvmupd_state_reading(struct i40e_hw *hw, +				     struct i40e_nvm_access *cmd, +				     u8 *bytes, int *perrno); +static int i40e_nvmupd_state_writing(struct i40e_hw *hw, +				     struct i40e_nvm_access *cmd, +				     u8 *bytes, int *errno);  static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,  						struct i40e_nvm_access *cmd,  						int *perrno); -static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, -					 struct i40e_nvm_access *cmd, -					 int *perrno); -static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, -					 struct i40e_nvm_access *cmd, -					 u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, -					struct i40e_nvm_access *cmd, -					u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, -				       struct i40e_nvm_access *cmd, -				       u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, -					     struct i40e_nvm_access *cmd, -					     u8 *bytes, int *perrno); -static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, -					    struct i40e_nvm_access *cmd, -					    u8 *bytes, int *perrno); +static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, +				 struct i40e_nvm_access *cmd, +				 int *perrno); +static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, +				 struct i40e_nvm_access *cmd, +				 u8 *bytes, int *perrno); +static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, +				struct i40e_nvm_access *cmd, +				u8 *bytes, int *perrno); +static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, +			       struct i40e_nvm_access *cmd, +			       u8 *bytes, int *perrno); +static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, +				     struct i40e_nvm_access *cmd, +				     u8 *bytes, int *perrno); +static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, +				    struct i40e_nvm_access *cmd, +				    u8 *bytes, int *perrno);  static inline u8 i40e_nvmupd_get_module(u32 val)  {  	return (u8)(val & I40E_NVM_MOD_PNT_MASK); @@ -807,12 +807,12 @@ static const char * const i40e_nvm_update_state_str[] = {   *   * Dispatches command depending on what update state is current   **/ -i40e_status i40e_nvmupd_command(struct i40e_hw *hw, -				struct i40e_nvm_access *cmd, -				u8 *bytes, int *perrno) +int i40e_nvmupd_command(struct i40e_hw *hw, +			struct i40e_nvm_access *cmd, +			u8 *bytes, int *perrno)  { -	i40e_status status;  	enum i40e_nvmupd_cmd upd_cmd; +	int status;  	/* assume success */  	*perrno = 0; @@ -923,12 +923,12 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,   * Process legitimate commands of the Init state and conditionally set next   * state. Reject all other commands.   **/ -static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, -					  struct i40e_nvm_access *cmd, -					  u8 *bytes, int *perrno) +static int i40e_nvmupd_state_init(struct i40e_hw *hw, +				  struct i40e_nvm_access *cmd, +				  u8 *bytes, int *perrno)  { -	i40e_status status = 0;  	enum i40e_nvmupd_cmd upd_cmd; +	int status = 0;  	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); @@ -1062,12 +1062,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,   * NVM ownership is already held.  Process legitimate commands and set any   * change in state; reject all other commands.   **/ -static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, -					     struct i40e_nvm_access *cmd, -					     u8 *bytes, int *perrno) +static int i40e_nvmupd_state_reading(struct i40e_hw *hw, +				     struct i40e_nvm_access *cmd, +				     u8 *bytes, int *perrno)  { -	i40e_status status = 0;  	enum i40e_nvmupd_cmd upd_cmd; +	int status = 0;  	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); @@ -1104,13 +1104,13 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,   * NVM ownership is already held.  Process legitimate commands and set any   * change in state; reject all other commands   **/ -static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, -					     struct i40e_nvm_access *cmd, -					     u8 *bytes, int *perrno) +static int i40e_nvmupd_state_writing(struct i40e_hw *hw, +				     struct i40e_nvm_access *cmd, +				     u8 *bytes, int *perrno)  { -	i40e_status status = 0;  	enum i40e_nvmupd_cmd upd_cmd;  	bool retry_attempt = false; +	int status = 0;  	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); @@ -1187,8 +1187,8 @@ retry:  	 */  	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&  	    !retry_attempt) { -		i40e_status old_status = status;  		u32 old_asq_status = hw->aq.asq_last_status; +		int old_status = status;  		u32 gtime;  		gtime = rd32(hw, I40E_GLVFGEN_TIMER); @@ -1370,17 +1370,17 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,   *   * cmd structure contains identifiers and data buffer   **/ -static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, -				       struct i40e_nvm_access *cmd, -				       u8 *bytes, int *perrno) +static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, +			       struct i40e_nvm_access *cmd, +			       u8 *bytes, int *perrno)  {  	struct i40e_asq_cmd_details cmd_details; -	i40e_status status;  	struct i40e_aq_desc *aq_desc;  	u32 buff_size = 0;  	u8 *buff = NULL;  	u32 aq_desc_len;  	u32 aq_data_len; +	int status;  	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);  	if (cmd->offset == 0xffff) @@ -1429,8 +1429,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,  				       buff_size, &cmd_details);  	if (status) {  		i40e_debug(hw, I40E_DEBUG_NVM, -			   "i40e_nvmupd_exec_aq err %s aq_err %s\n", -			   i40e_stat_str(hw, status), +			   "%s err %pe aq_err %s\n", +			   __func__, ERR_PTR(status),  			   i40e_aq_str(hw, hw->aq.asq_last_status));  		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);  		return status; @@ -1454,9 +1454,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,   *   * cmd structure contains identifiers and data buffer   **/ -static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw, -					     struct i40e_nvm_access *cmd, -					     u8 *bytes, int *perrno) +static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, +				     struct i40e_nvm_access *cmd, +				     u8 *bytes, int *perrno)  {  	u32 aq_total_len;  	u32 aq_desc_len; @@ -1523,9 +1523,9 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,   *   * cmd structure contains identifiers and data buffer   **/ -static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw, -					    struct i40e_nvm_access *cmd, -					    u8 *bytes, int *perrno) +static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, +				    struct i40e_nvm_access *cmd, +				    u8 *bytes, int *perrno)  {  	u32 aq_total_len;  	u32 aq_desc_len; @@ -1557,13 +1557,13 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,   *   * cmd structure contains identifiers and data buffer   **/ -static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, -					struct i40e_nvm_access *cmd, -					u8 *bytes, int *perrno) +static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, +				struct i40e_nvm_access *cmd, +				u8 *bytes, int *perrno)  {  	struct i40e_asq_cmd_details cmd_details; -	i40e_status status;  	u8 module, transaction; +	int status;  	bool last;  	transaction = i40e_nvmupd_get_transaction(cmd->config); @@ -1596,13 +1596,13 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,   *   * module, offset, data_size and data are in cmd structure   **/ -static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, -					 struct i40e_nvm_access *cmd, -					 int *perrno) +static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, +				 struct i40e_nvm_access *cmd, +				 int *perrno)  { -	i40e_status status = 0;  	struct i40e_asq_cmd_details cmd_details;  	u8 module, transaction; +	int status = 0;  	bool last;  	transaction = i40e_nvmupd_get_transaction(cmd->config); @@ -1636,14 +1636,14 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,   *   * module, offset, data_size and data are in cmd structure   **/ -static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, -					 struct i40e_nvm_access *cmd, -					 u8 *bytes, int *perrno) +static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, +				 struct i40e_nvm_access *cmd, +				 u8 *bytes, int *perrno)  { -	i40e_status status = 0;  	struct i40e_asq_cmd_details cmd_details;  	u8 module, transaction;  	u8 preservation_flags; +	int status = 0;  	bool last;  	transaction = i40e_nvmupd_get_transaction(cmd->config); diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 2f6815b2f8df..2bd4de03dafa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -56,5 +56,4 @@ do {								\  			(h)->bus.func, ##__VA_ARGS__);		\  } while (0) -typedef enum i40e_status_code i40e_status;  #endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index ebdcde6f1aeb..fe845987d99a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -16,29 +16,29 @@   */  /* adminq functions */ -i40e_status i40e_init_adminq(struct i40e_hw *hw); +int i40e_init_adminq(struct i40e_hw *hw);  void i40e_shutdown_adminq(struct i40e_hw *hw);  void i40e_adminq_init_ring_data(struct i40e_hw *hw); -i40e_status i40e_clean_arq_element(struct i40e_hw *hw, -					     struct i40e_arq_event_info *e, -					     u16 *events_pending); -i40e_status +int i40e_clean_arq_element(struct i40e_hw *hw, +			   struct i40e_arq_event_info *e, +			   u16 *events_pending); +int  i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,  		      void *buff, /* can be NULL */ u16  buff_size,  		      struct i40e_asq_cmd_details *cmd_details); -i40e_status +int  i40e_asq_send_command_v2(struct i40e_hw *hw,  			 struct i40e_aq_desc *desc,  			 void *buff, /* can be NULL */  			 u16  buff_size,  			 struct i40e_asq_cmd_details *cmd_details,  			 enum i40e_admin_queue_err *aq_status); -i40e_status +int  i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,  			     void *buff, /* can be NULL */ u16  buff_size,  			     struct i40e_asq_cmd_details *cmd_details,  			     bool is_atomic_context); -i40e_status +int  i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,  				struct i40e_aq_desc *desc,  				void *buff, /* can be NULL */ @@ -53,324 +53,332 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,  void i40e_idle_aq(struct i40e_hw *hw);  bool i40e_check_asq_alive(struct i40e_hw *hw); -i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);  const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); -i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, -				bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, -				bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, -				u16 seid, -				struct i40e_aqc_get_set_rss_key_data *key); -i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, -				u16 seid, -				struct i40e_aqc_get_set_rss_key_data *key); +int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, +			bool pf_lut, u8 *lut, u16 lut_size); +int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, +			bool pf_lut, u8 *lut, u16 lut_size); +int i40e_aq_get_rss_key(struct i40e_hw *hw, +			u16 seid, +			struct i40e_aqc_get_set_rss_key_data *key); +int i40e_aq_set_rss_key(struct i40e_hw *hw, +			u16 seid, +			struct i40e_aqc_get_set_rss_key_data *key);  u32 i40e_led_get(struct i40e_hw *hw);  void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); -i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, -			     u16 led_addr, u32 mode); -i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, -			     u16 *val); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, -				    u32 time, u32 interval); +int i40e_led_set_phy(struct i40e_hw *hw, bool on, +		     u16 led_addr, u32 mode); +int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, +		     u16 *val); +int i40e_blink_phy_link_led(struct i40e_hw *hw, +			    u32 time, u32 interval);  /* admin send queue commands */ -i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, -				u16 *fw_major_version, u16 *fw_minor_version, -				u32 *fw_build, -				u16 *api_major_version, u16 *api_minor_version, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, -					u32 reg_addr, u64 reg_val, -					struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, +int i40e_aq_get_firmware_version(struct i40e_hw *hw, +				 u16 *fw_major_version, u16 *fw_minor_version, +				 u32 *fw_build, +				 u16 *api_major_version, u16 *api_minor_version, +				 struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_debug_write_register(struct i40e_hw *hw, +				 u32 reg_addr, u64 reg_val, +				 struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_debug_read_register(struct i40e_hw *hw,  				u32  reg_addr, u64 *reg_val,  				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, -				      struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, -			bool qualified_modules, bool report_init, -			struct i40e_aq_get_phy_abilities_resp *abilities, -			struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, -				struct i40e_aq_set_phy_config *config, -				struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, -				  bool atomic_reset); -i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, -				     struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, -					bool enable_link, -					struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, -				bool enable_lse, struct i40e_link_status *link, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, -				u64 advt_reg, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, +int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, +			  struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, +			    struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, +			      struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_phy_capabilities(struct i40e_hw *hw, +				 bool qualified_modules, bool report_init, +				 struct i40e_aq_get_phy_abilities_resp *abilities, +				 struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_phy_config(struct i40e_hw *hw, +			   struct i40e_aq_set_phy_config *config, +			   struct i40e_asq_cmd_details *cmd_details); +int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, +		bool atomic_reset); +int i40e_aq_set_mac_loopback(struct i40e_hw *hw, +			     bool ena_lpbk, +			     struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, +			     struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_clear_pxe_mode(struct i40e_hw *hw, +			   struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_link_restart_an(struct i40e_hw *hw, +				bool enable_link, +				struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_link_info(struct i40e_hw *hw, +			  bool enable_lse, struct i40e_link_status *link, +			  struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_local_advt_reg(struct i40e_hw *hw, +			       u64 advt_reg, +			       struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_send_driver_version(struct i40e_hw *hw,  				struct i40e_driver_version *dv,  				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, -				struct i40e_vsi_context *vsi_ctx, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, -				u16 vsi_id, bool set_filter, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, -		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details, -		bool rx_only_promisc); -i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, -		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, -							 u16 seid, bool enable, -							 u16 vid, -				struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, -							 u16 seid, bool enable, -							 u16 vid, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, -				u16 seid, bool enable, u16 vid, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, -				u16 seid, bool enable, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, -				struct i40e_vsi_context *vsi_ctx, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, -				struct i40e_vsi_context *vsi_ctx, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, -				u16 downlink_seid, u8 enabled_tc, -				bool default_port, u16 *pveb_seid, -				bool enable_stats, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, -				u16 veb_seid, u16 *switch_id, bool *floating, -				u16 *statistic_index, u16 *vebs_used, -				u16 *vebs_free, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, +int i40e_aq_add_vsi(struct i40e_hw *hw, +		    struct i40e_vsi_context *vsi_ctx, +		    struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, +			      u16 vsi_id, bool set_filter, +			      struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, +					struct i40e_asq_cmd_details *cmd_details, +					bool rx_only_promisc); +int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, +					  struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, +				       u16 seid, bool enable, +				       u16 vid, +				       struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, +				       u16 seid, bool enable, +				       u16 vid, +				       struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, +				       u16 seid, bool enable, u16 vid, +				       struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, +				 u16 seid, bool enable, +				 struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_vsi_params(struct i40e_hw *hw, +			   struct i40e_vsi_context *vsi_ctx, +			   struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_update_vsi_params(struct i40e_hw *hw, +			      struct i40e_vsi_context *vsi_ctx, +			      struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, +		    u16 downlink_seid, u8 enabled_tc, +		    bool default_port, u16 *pveb_seid, +		    bool enable_stats, +		    struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_veb_parameters(struct i40e_hw *hw, +			       u16 veb_seid, u16 *switch_id, bool *floating, +			       u16 *statistic_index, u16 *vebs_used, +			       u16 *vebs_free, +			       struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,  			struct i40e_aqc_add_macvlan_element_data *mv_list,  			u16 count, struct i40e_asq_cmd_details *cmd_details); -i40e_status +int  i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,  		       struct i40e_aqc_add_macvlan_element_data *mv_list,  		       u16 count, struct i40e_asq_cmd_details *cmd_details,  		       enum i40e_admin_queue_err *aq_status); -i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, -			struct i40e_aqc_remove_macvlan_element_data *mv_list, -			u16 count, struct i40e_asq_cmd_details *cmd_details); -i40e_status +int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, +			   struct i40e_aqc_remove_macvlan_element_data *mv_list, +			   u16 count, struct i40e_asq_cmd_details *cmd_details); +int  i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,  			  struct i40e_aqc_remove_macvlan_element_data *mv_list,  			  u16 count, struct i40e_asq_cmd_details *cmd_details,  			  enum i40e_admin_queue_err *aq_status); -i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, -			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, -			struct i40e_asq_cmd_details *cmd_details, -			u16 *rule_id, u16 *rules_used, u16 *rules_free); -i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, -			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, -			struct i40e_asq_cmd_details *cmd_details, -			u16 *rules_used, u16 *rules_free); +int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, +			   u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, +			   struct i40e_asq_cmd_details *cmd_details, +			   u16 *rule_id, u16 *rules_used, u16 *rules_free); +int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, +			      u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, +			      struct i40e_asq_cmd_details *cmd_details, +			      u16 *rules_used, u16 *rules_free); -i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, -				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, -				struct i40e_aqc_get_switch_config_resp *buf, -				u16 buf_size, u16 *start_seid, -				struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, -						u16 flags, -						u16 valid_flags, u8 mode, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_request_resource(struct i40e_hw *hw, -				enum i40e_aq_resources_ids resource, -				enum i40e_aq_resource_access_type access, -				u8 sdp_number, u64 *timeout, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_release_resource(struct i40e_hw *hw, -				enum i40e_aq_resources_ids resource, -				u8 sdp_number, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, -				u32 offset, u16 length, void *data, -				bool last_command, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, -			      u32 offset, u16 length, bool last_command, +int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, +			   u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, +			   struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_switch_config(struct i40e_hw *hw, +			      struct i40e_aqc_get_switch_config_resp *buf, +			      u16 buf_size, u16 *start_seid,  			      struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, -				void *buff, u16 buff_size, u16 *data_size, -				enum i40e_admin_queue_opc list_type_opc, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, -				u32 offset, u16 length, void *data, -				bool last_command, u8 preservation_flags, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, -				  u8 rearrange_nvm, +int i40e_aq_set_switch_config(struct i40e_hw *hw, +			      u16 flags, +			      u16 valid_flags, u8 mode, +			      struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_request_resource(struct i40e_hw *hw, +			     enum i40e_aq_resources_ids resource, +			     enum i40e_aq_resource_access_type access, +			     u8 sdp_number, u64 *timeout, +			     struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_release_resource(struct i40e_hw *hw, +			     enum i40e_aq_resources_ids resource, +			     u8 sdp_number, +			     struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, +		     u32 offset, u16 length, void *data, +		     bool last_command, +		     struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, +		      u32 offset, u16 length, bool last_command, +		      struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_discover_capabilities(struct i40e_hw *hw, +				  void *buff, u16 buff_size, u16 *data_size, +				  enum i40e_admin_queue_opc list_type_opc,  				  struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, -				u8 mib_type, void *buff, u16 buff_size, -				u16 *local_len, u16 *remote_len, -				struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code +int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, +		       u32 offset, u16 length, void *data, +		       bool last_command, u8 preservation_flags, +		       struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_rearrange_nvm(struct i40e_hw *hw, +			  u8 rearrange_nvm, +			  struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, +			 u8 mib_type, void *buff, u16 buff_size, +			 u16 *local_len, u16 *remote_len, +			 struct i40e_asq_cmd_details *cmd_details); +int  i40e_aq_set_lldp_mib(struct i40e_hw *hw,  		     u8 mib_type, void *buff, u16 buff_size,  		     struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, -				bool enable_update, -				struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code +int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, +				      bool enable_update, +				      struct i40e_asq_cmd_details *cmd_details); +int  i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,  		     struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, -			      bool persist, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw, -				       bool dcb_enable, -				       struct i40e_asq_cmd_details -				       *cmd_details); -i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, +int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, +		      bool persist, +		      struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_set_dcb_parameters(struct i40e_hw *hw, +			       bool dcb_enable, +			       struct i40e_asq_cmd_details +			       *cmd_details); +int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, +		       struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, +			       void *buff, u16 buff_size,  			       struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, -				       void *buff, u16 buff_size, -				       struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, -				u16 udp_port, u8 protocol_index, -				u8 *filter_index, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, -				    u16 flags, u8 *mac_addr, -				    struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, +int i40e_aq_add_udp_tunnel(struct i40e_hw *hw, +			   u16 udp_port, u8 protocol_index, +			   u8 *filter_index, +			   struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, +			   struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, +			   struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_mac_address_write(struct i40e_hw *hw, +			      u16 flags, u8 *mac_addr, +			      struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,  				u16 seid, u16 credit, u8 max_credit,  				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, -				u16 seid, u16 credit, u8 max_bw, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, -			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, +int i40e_aq_dcb_updated(struct i40e_hw *hw,  			struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, -		u16 seid, -		struct i40e_aqc_configure_switching_comp_ets_data *ets_data, -		enum i40e_admin_queue_opc opcode, -		struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, +int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, +					u16 seid, u16 credit, u8 max_bw, +					struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, +			     struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, +			     struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, +			       u16 seid, +			       struct i40e_aqc_configure_switching_comp_ets_data *ets_data, +			       enum i40e_admin_queue_opc opcode, +			       struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,  	u16 seid,  	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,  	struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, -			u16 seid, -			struct i40e_aqc_query_vsi_bw_config_resp *bw_data, -			struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, -			u16 seid, -			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, -			struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, -		u16 seid, -		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, -		struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, -		u16 seid, -		struct i40e_aqc_query_port_ets_config_resp *bw_data, -		struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, -		u16 seid, -		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, -		struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, -				   struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code +int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, +				u16 seid, +				struct i40e_aqc_query_vsi_bw_config_resp *bw_data, +				struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, +				 u16 seid, +				 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, +				 struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, +				     u16 seid, +				     struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, +				     struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_query_port_ets_config(struct i40e_hw *hw, +			      u16 seid, +			      struct i40e_aqc_query_port_ets_config_resp *bw_data, +			      struct i40e_asq_cmd_details *cmd_details); +int +i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, +				    u16 seid, +				    struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, +				    struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_resume_port_tx(struct i40e_hw *hw, +			   struct i40e_asq_cmd_details *cmd_details); +int  i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,  			     struct i40e_aqc_cloud_filters_element_bb *filters,  			     u8 filter_count); -enum i40e_status_code +int  i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,  			  struct i40e_aqc_cloud_filters_element_data *filters,  			  u8 filter_count); -enum i40e_status_code +int  i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,  			  struct i40e_aqc_cloud_filters_element_data *filters,  			  u8 filter_count); -enum i40e_status_code +int  i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,  			     struct i40e_aqc_cloud_filters_element_bb *filters,  			     u8 filter_count); -i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, -			       struct i40e_lldp_variables *lldp_cfg); -enum i40e_status_code +int i40e_read_lldp_cfg(struct i40e_hw *hw, +		       struct i40e_lldp_variables *lldp_cfg); +int  i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,  			struct i40e_asq_cmd_details *cmd_details);  /* i40e_common */ -i40e_status i40e_init_shared_code(struct i40e_hw *hw); -i40e_status i40e_pf_reset(struct i40e_hw *hw); +int i40e_init_shared_code(struct i40e_hw *hw); +int i40e_pf_reset(struct i40e_hw *hw);  void i40e_clear_hw(struct i40e_hw *hw);  void i40e_clear_pxe_mode(struct i40e_hw *hw); -i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up); -i40e_status i40e_update_link_info(struct i40e_hw *hw); -i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); -i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, -				      u32 *max_bw, u32 *min_bw, bool *min_valid, -				      bool *max_valid); -i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, -			struct i40e_aqc_configure_partition_bw_data *bw_data, -			struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); -i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, -				 u32 pba_num_size); -i40e_status i40e_validate_mac_addr(u8 *mac_addr); +int i40e_get_link_status(struct i40e_hw *hw, bool *link_up); +int i40e_update_link_info(struct i40e_hw *hw); +int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +int i40e_read_bw_from_alt_ram(struct i40e_hw *hw, +			      u32 *max_bw, u32 *min_bw, bool *min_valid, +			      bool *max_valid); +int +i40e_aq_configure_partition_bw(struct i40e_hw *hw, +			       struct i40e_aqc_configure_partition_bw_data *bw_data, +			       struct i40e_asq_cmd_details *cmd_details); +int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, +			 u32 pba_num_size); +int i40e_validate_mac_addr(u8 *mac_addr);  void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);  /* prototype for functions used for NVM access */ -i40e_status i40e_init_nvm(struct i40e_hw *hw); -i40e_status i40e_acquire_nvm(struct i40e_hw *hw, -				      enum i40e_aq_resource_access_type access); +int i40e_init_nvm(struct i40e_hw *hw); +int i40e_acquire_nvm(struct i40e_hw *hw, +		     enum i40e_aq_resource_access_type access);  void i40e_release_nvm(struct i40e_hw *hw); -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, -					 u16 *data); -enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw, -						u8 module_ptr, -						u16 module_offset, -						u16 data_offset, -						u16 words_data_size, -						u16 *data_ptr); -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, -				 u16 *words, u16 *data); -i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); -i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, -						 u16 *checksum); -i40e_status i40e_nvmupd_command(struct i40e_hw *hw, -				struct i40e_nvm_access *cmd, -				u8 *bytes, int *); +int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, +		       u16 *data); +int i40e_read_nvm_module_data(struct i40e_hw *hw, +			      u8 module_ptr, +			      u16 module_offset, +			      u16 data_offset, +			      u16 words_data_size, +			      u16 *data_ptr); +int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, +			 u16 *words, u16 *data); +int i40e_update_nvm_checksum(struct i40e_hw *hw); +int i40e_validate_nvm_checksum(struct i40e_hw *hw, +			       u16 *checksum); +int i40e_nvmupd_command(struct i40e_hw *hw, +			struct i40e_nvm_access *cmd, +			u8 *bytes, int *errno);  void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,  				  struct i40e_aq_desc *desc);  void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);  void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); -i40e_status i40e_set_mac_type(struct i40e_hw *hw); +int i40e_set_mac_type(struct i40e_hw *hw);  extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; @@ -419,41 +427,41 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)  /* i40e_common for VF drivers*/  void i40e_vf_parse_hw_config(struct i40e_hw *hw,  			     struct virtchnl_vf_resource *msg); -i40e_status i40e_vf_reset(struct i40e_hw *hw); -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, -				enum virtchnl_ops v_opcode, -				i40e_status v_retval, -				u8 *msg, u16 msglen, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_set_filter_control(struct i40e_hw *hw, -				struct i40e_filter_control_settings *settings); -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, -				u8 *mac_addr, u16 ethtype, u16 flags, -				u16 vsi_seid, u16 queue, bool is_add, -				struct i40e_control_filter_stats *stats, -				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, -			       u8 table_id, u32 start_index, u16 buff_size, -			       void *buff, u16 *ret_buff_size, -			       u8 *ret_next_table, u32 *ret_next_index, -			       struct i40e_asq_cmd_details *cmd_details); +int i40e_vf_reset(struct i40e_hw *hw); +int i40e_aq_send_msg_to_pf(struct i40e_hw *hw, +			   enum virtchnl_ops v_opcode, +			   int v_retval, +			   u8 *msg, u16 msglen, +			   struct i40e_asq_cmd_details *cmd_details); +int i40e_set_filter_control(struct i40e_hw *hw, +			    struct i40e_filter_control_settings *settings); +int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, +					  u8 *mac_addr, u16 ethtype, u16 flags, +					  u16 vsi_seid, u16 queue, bool is_add, +					  struct i40e_control_filter_stats *stats, +					  struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, +		       u8 table_id, u32 start_index, u16 buff_size, +		       void *buff, u16 *ret_buff_size, +		       u8 *ret_next_table, u32 *ret_next_index, +		       struct i40e_asq_cmd_details *cmd_details);  void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,  						    u16 vsi_seid); -i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, -				u32 reg_addr, u32 *reg_val, -				struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, +				 u32 reg_addr, u32 *reg_val, +				 struct i40e_asq_cmd_details *cmd_details);  u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); -i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, -				u32 reg_addr, u32 reg_val, -				struct i40e_asq_cmd_details *cmd_details); +int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, +				  u32 reg_addr, u32 reg_val, +				  struct i40e_asq_cmd_details *cmd_details);  void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); -enum i40e_status_code +int  i40e_aq_set_phy_register_ext(struct i40e_hw *hw,  			     u8 phy_select, u8 dev_addr, bool page_change,  			     bool set_mdio, u8 mdio_num,  			     u32 reg_addr, u32 reg_val,  			     struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code +int  i40e_aq_get_phy_register_ext(struct i40e_hw *hw,  			     u8 phy_select, u8 dev_addr, bool page_change,  			     bool set_mdio, u8 mdio_num, @@ -466,43 +474,43 @@ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,  #define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd)		\  	i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd) -i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, -					    u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, -					     u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, -				u8 page, u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, -				u8 page, u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, -				   u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, -				    u8 phy_addr, u16 value); +int i40e_read_phy_register_clause22(struct i40e_hw *hw, +				    u16 reg, u8 phy_addr, u16 *value); +int i40e_write_phy_register_clause22(struct i40e_hw *hw, +				     u16 reg, u8 phy_addr, u16 value); +int i40e_read_phy_register_clause45(struct i40e_hw *hw, +				    u8 page, u16 reg, u8 phy_addr, u16 *value); +int i40e_write_phy_register_clause45(struct i40e_hw *hw, +				     u8 page, u16 reg, u8 phy_addr, u16 value); +int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, +			   u8 phy_addr, u16 *value); +int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, +			    u8 phy_addr, u16 value);  u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, -				    u32 time, u32 interval); -i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, -			      u16 buff_size, u32 track_id, -			      u32 *error_offset, u32 *error_info, -			      struct i40e_asq_cmd_details * -			      cmd_details); -i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, -				 u16 buff_size, u8 flags, -				 struct i40e_asq_cmd_details * -				 cmd_details); +int i40e_blink_phy_link_led(struct i40e_hw *hw, +			    u32 time, u32 interval); +int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, +		      u16 buff_size, u32 track_id, +		      u32 *error_offset, u32 *error_info, +		      struct i40e_asq_cmd_details * +		      cmd_details); +int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, +			 u16 buff_size, u8 flags, +			 struct i40e_asq_cmd_details * +			 cmd_details);  struct i40e_generic_seg_header *  i40e_find_segment_in_package(u32 segment_type,  			     struct i40e_package_header *pkg_header);  struct i40e_profile_section_header *  i40e_find_section_in_profile(u32 section_type,  			     struct i40e_profile_segment *profile); -enum i40e_status_code +int  i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,  		   u32 track_id); -enum i40e_status_code +int  i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,  		      u32 track_id); -enum i40e_status_code +int  i40e_add_pinfo_to_list(struct i40e_hw *hw,  		       struct i40e_profile_segment *profile,  		       u8 *profile_info_sec, u32 track_id); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index ffea0c9c82f1..c37abbb3cd06 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -347,23 +347,12 @@ static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)  {  	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);  	struct i40e_hw *hw = &pf->hw; -	u64 adj, freq, diff; -	int neg_adj = 0; - -	if (scaled_ppm < 0) { -		neg_adj = 1; -		scaled_ppm = -scaled_ppm; -	} +	u64 adj, base_adj;  	smp_mb(); /* Force any pending update before accessing. */ -	freq = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult); -	diff = mul_u64_u64_div_u64(freq, (u64)scaled_ppm, -				   1000000ULL << 16); +	base_adj = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult); -	if (neg_adj) -		adj = I40E_PTP_40GB_INCVAL - diff; -	else -		adj = I40E_PTP_40GB_INCVAL + diff; +	adj = adjust_by_scaled_ppm(base_adj, scaled_ppm);  	wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);  	wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index db3714a65dc7..4d2782e76038 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -9,65 +9,30 @@ enum i40e_status_code {  	I40E_SUCCESS				= 0,  	I40E_ERR_NVM				= -1,  	I40E_ERR_NVM_CHECKSUM			= -2, -	I40E_ERR_PHY				= -3,  	I40E_ERR_CONFIG				= -4,  	I40E_ERR_PARAM				= -5, -	I40E_ERR_MAC_TYPE			= -6,  	I40E_ERR_UNKNOWN_PHY			= -7, -	I40E_ERR_LINK_SETUP			= -8, -	I40E_ERR_ADAPTER_STOPPED		= -9,  	I40E_ERR_INVALID_MAC_ADDR		= -10,  	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11, -	I40E_ERR_PRIMARY_REQUESTS_PENDING	= -12, -	I40E_ERR_INVALID_LINK_SETTINGS		= -13, -	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,  	I40E_ERR_RESET_FAILED			= -15, -	I40E_ERR_SWFW_SYNC			= -16,  	I40E_ERR_NO_AVAILABLE_VSI		= -17,  	I40E_ERR_NO_MEMORY			= -18,  	I40E_ERR_BAD_PTR			= -19, -	I40E_ERR_RING_FULL			= -20, -	I40E_ERR_INVALID_PD_ID			= -21, -	I40E_ERR_INVALID_QP_ID			= -22, -	I40E_ERR_INVALID_CQ_ID			= -23, -	I40E_ERR_INVALID_CEQ_ID			= -24, -	I40E_ERR_INVALID_AEQ_ID			= -25,  	I40E_ERR_INVALID_SIZE			= -26, -	I40E_ERR_INVALID_ARP_INDEX		= -27, -	I40E_ERR_INVALID_FPM_FUNC_ID		= -28, -	I40E_ERR_QP_INVALID_MSG_SIZE		= -29, -	I40E_ERR_QP_TOOMANY_WRS_POSTED		= -30, -	I40E_ERR_INVALID_FRAG_COUNT		= -31,  	I40E_ERR_QUEUE_EMPTY			= -32, -	I40E_ERR_INVALID_ALIGNMENT		= -33, -	I40E_ERR_FLUSHED_QUEUE			= -34, -	I40E_ERR_INVALID_PUSH_PAGE_INDEX	= -35, -	I40E_ERR_INVALID_IMM_DATA_SIZE		= -36,  	I40E_ERR_TIMEOUT			= -37, -	I40E_ERR_OPCODE_MISMATCH		= -38, -	I40E_ERR_CQP_COMPL_ERROR		= -39, -	I40E_ERR_INVALID_VF_ID			= -40, -	I40E_ERR_INVALID_HMCFN_ID		= -41, -	I40E_ERR_BACKING_PAGE_ERROR		= -42, -	I40E_ERR_NO_PBLCHUNKS_AVAILABLE		= -43, -	I40E_ERR_INVALID_PBLE_INDEX		= -44,  	I40E_ERR_INVALID_SD_INDEX		= -45,  	I40E_ERR_INVALID_PAGE_DESC_INDEX	= -46,  	I40E_ERR_INVALID_SD_TYPE		= -47, -	I40E_ERR_MEMCPY_FAILED			= -48,  	I40E_ERR_INVALID_HMC_OBJ_INDEX		= -49,  	I40E_ERR_INVALID_HMC_OBJ_COUNT		= -50, -	I40E_ERR_INVALID_SRQ_ARM_LIMIT		= -51, -	I40E_ERR_SRQ_ENABLED			= -52,  	I40E_ERR_ADMIN_QUEUE_ERROR		= -53,  	I40E_ERR_ADMIN_QUEUE_TIMEOUT		= -54,  	I40E_ERR_BUF_TOO_SHORT			= -55,  	I40E_ERR_ADMIN_QUEUE_FULL		= -56,  	I40E_ERR_ADMIN_QUEUE_NO_WORK		= -57, -	I40E_ERR_BAD_IWARP_CQE			= -58,  	I40E_ERR_NVM_BLANK_MODE			= -59,  	I40E_ERR_NOT_IMPLEMENTED		= -60, -	I40E_ERR_PE_DOORBELL_NOT_ENABLED	= -61,  	I40E_ERR_DIAG_TEST_FAILED		= -62,  	I40E_ERR_NOT_READY			= -63,  	I40E_NOT_SUPPORTED			= -64, diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h index b5b12299931f..79d587ad5409 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -55,6 +55,55 @@   * being built from shared code.   */ +#define NO_DEV "(i40e no_device)" + +TRACE_EVENT(i40e_napi_poll, + +	TP_PROTO(struct napi_struct *napi, struct i40e_q_vector *q, int budget, +		 int budget_per_ring, unsigned int rx_cleaned, unsigned int tx_cleaned, +		 bool rx_clean_complete, bool tx_clean_complete), + +	TP_ARGS(napi, q, budget, budget_per_ring, rx_cleaned, tx_cleaned, +		rx_clean_complete, tx_clean_complete), + +	TP_STRUCT__entry( +		__field(int, budget) +		__field(int, budget_per_ring) +		__field(unsigned int, rx_cleaned) +		__field(unsigned int, tx_cleaned) +		__field(int, rx_clean_complete) +		__field(int, tx_clean_complete) +		__field(int, irq_num) +		__field(int, curr_cpu) +		__string(qname, q->name) +		__string(dev_name, napi->dev ? napi->dev->name : NO_DEV) +		__bitmask(irq_affinity,	nr_cpumask_bits) +	), + +	TP_fast_assign( +		__entry->budget = budget; +		__entry->budget_per_ring = budget_per_ring; +		__entry->rx_cleaned = rx_cleaned; +		__entry->tx_cleaned = tx_cleaned; +		__entry->rx_clean_complete = rx_clean_complete; +		__entry->tx_clean_complete = tx_clean_complete; +		__entry->irq_num = q->irq_num; +		__entry->curr_cpu = get_cpu(); +		__assign_str(qname, q->name); +		__assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV); +		__assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask), +				 nr_cpumask_bits); +	), + +	TP_printk("i40e_napi_poll on dev %s q %s irq %d irq_mask %s curr_cpu %d " +		  "budget %d bpr %d rx_cleaned %u tx_cleaned %u " +		  "rx_clean_complete %d tx_clean_complete %d", +		__get_str(dev_name), __get_str(qname), __entry->irq_num, +		__get_bitmask(irq_affinity), __entry->curr_cpu, __entry->budget, +		__entry->budget_per_ring, __entry->rx_cleaned, __entry->tx_cleaned, +		__entry->rx_clean_complete, __entry->tx_clean_complete) +); +  /* Events related to a vsi & ring */  DECLARE_EVENT_CLASS(  	i40e_tx_template, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b97c95f89fa0..924f972b91fa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -923,11 +923,13 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi)   * @vsi: the VSI we care about   * @tx_ring: Tx ring to clean   * @napi_budget: Used to determine if we are in netpoll + * @tx_cleaned: Out parameter set to the number of TXes cleaned   *   * Returns true if there's any budget left (e.g. the clean is finished)   **/  static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, -			      struct i40e_ring *tx_ring, int napi_budget) +			      struct i40e_ring *tx_ring, int napi_budget, +			      unsigned int *tx_cleaned)  {  	int i = tx_ring->next_to_clean;  	struct i40e_tx_buffer *tx_buf; @@ -1048,6 +1050,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,  		}  	} +	*tx_cleaned = total_packets;  	return !!budget;  } @@ -2422,6 +2425,7 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)   * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf   * @rx_ring: rx descriptor ring to transact packets on   * @budget: Total limit on number of packets to process + * @rx_cleaned: Out parameter of the number of packets processed   *   * This function provides a "bounce buffer" approach to Rx interrupt   * processing.  The advantage to this is that on systems that have @@ -2430,7 +2434,8 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)   *   * Returns amount of work completed   **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, +			     unsigned int *rx_cleaned)  {  	unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;  	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); @@ -2567,6 +2572,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)  	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); +	*rx_cleaned = total_rx_packets; +  	/* guarantee a trip back through this routine if there was a failure */  	return failure ? budget : (int)total_rx_packets;  } @@ -2689,6 +2696,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)  			       container_of(napi, struct i40e_q_vector, napi);  	struct i40e_vsi *vsi = q_vector->vsi;  	struct i40e_ring *ring; +	bool tx_clean_complete = true; +	bool rx_clean_complete = true; +	unsigned int tx_cleaned = 0; +	unsigned int rx_cleaned = 0;  	bool clean_complete = true;  	bool arm_wb = false;  	int budget_per_ring; @@ -2705,10 +2716,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)  	i40e_for_each_ring(ring, q_vector->tx) {  		bool wd = ring->xsk_pool ?  			  i40e_clean_xdp_tx_irq(vsi, ring) : -			  i40e_clean_tx_irq(vsi, ring, budget); +			  i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned);  		if (!wd) { -			clean_complete = false; +			clean_complete = tx_clean_complete = false;  			continue;  		}  		arm_wb |= ring->arm_wb; @@ -2733,14 +2744,18 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)  	i40e_for_each_ring(ring, q_vector->rx) {  		int cleaned = ring->xsk_pool ?  			      i40e_clean_rx_irq_zc(ring, budget_per_ring) : -			      i40e_clean_rx_irq(ring, budget_per_ring); +			      i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned);  		work_done += cleaned;  		/* if we clean as many as budgeted, we must not be done */  		if (cleaned >= budget_per_ring) -			clean_complete = false; +			clean_complete = rx_clean_complete = false;  	} +	if (!i40e_enabled_xdp_vsi(vsi)) +		trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned, +				     tx_cleaned, rx_clean_complete, tx_clean_complete); +  	/* If work not completed, return budget and polling will return */  	if (!clean_complete) {  		int cpu_id = smp_processor_id(); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 72ddcefc45b1..8a4587585acd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -17,7 +17,7 @@   **/  static void i40e_vc_vf_broadcast(struct i40e_pf *pf,  				 enum virtchnl_ops v_opcode, -				 i40e_status v_retval, u8 *msg, +				 int v_retval, u8 *msg,  				 u16 msglen)  {  	struct i40e_hw *hw = &pf->hw; @@ -441,14 +441,14 @@ irq_list_done:  }  /** - * i40e_release_iwarp_qvlist + * i40e_release_rdma_qvlist   * @vf: pointer to the VF.   *   **/ -static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) +static void i40e_release_rdma_qvlist(struct i40e_vf *vf)  {  	struct i40e_pf *pf = vf->pf; -	struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; +	struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;  	u32 msix_vf;  	u32 i; @@ -457,7 +457,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)  	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;  	for (i = 0; i < qvlist_info->num_vectors; i++) { -		struct virtchnl_iwarp_qv_info *qv_info; +		struct virtchnl_rdma_qv_info *qv_info;  		u32 next_q_index, next_q_type;  		struct i40e_hw *hw = &pf->hw;  		u32 v_idx, reg_idx, reg; @@ -491,18 +491,19 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)  }  /** - * i40e_config_iwarp_qvlist + * i40e_config_rdma_qvlist   * @vf: pointer to the VF info   * @qvlist_info: queue and vector list   *   * Return 0 on success or < 0 on error   **/ -static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, -				    struct virtchnl_iwarp_qvlist_info *qvlist_info) +static int +i40e_config_rdma_qvlist(struct i40e_vf *vf, +			struct virtchnl_rdma_qvlist_info *qvlist_info)  {  	struct i40e_pf *pf = vf->pf;  	struct i40e_hw *hw = &pf->hw; -	struct virtchnl_iwarp_qv_info *qv_info; +	struct virtchnl_rdma_qv_info *qv_info;  	u32 v_idx, i, reg_idx, reg;  	u32 next_q_idx, next_q_type;  	u32 msix_vf; @@ -1246,13 +1247,13 @@ err:   * @vl: List of VLANs - apply filter for given VLANs   * @num_vlans: Number of elements in @vl   **/ -static i40e_status +static int  i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,  		     bool unicast_enable, s16 *vl, u16 num_vlans)  { -	i40e_status aq_ret, aq_tmp = 0;  	struct i40e_pf *pf = vf->pf;  	struct i40e_hw *hw = &pf->hw; +	int aq_ret, aq_tmp = 0;  	int i;  	/* No VLAN to set promisc on, set on VSI */ @@ -1264,9 +1265,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,  			int aq_err = pf->hw.aq.asq_last_status;  			dev_err(&pf->pdev->dev, -				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", +				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",  				vf->vf_id, -				i40e_stat_str(&pf->hw, aq_ret), +				ERR_PTR(aq_ret),  				i40e_aq_str(&pf->hw, aq_err));  			return aq_ret; @@ -1280,9 +1281,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,  			int aq_err = pf->hw.aq.asq_last_status;  			dev_err(&pf->pdev->dev, -				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", +				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",  				vf->vf_id, -				i40e_stat_str(&pf->hw, aq_ret), +				ERR_PTR(aq_ret),  				i40e_aq_str(&pf->hw, aq_err));  		} @@ -1297,9 +1298,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,  			int aq_err = pf->hw.aq.asq_last_status;  			dev_err(&pf->pdev->dev, -				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", +				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",  				vf->vf_id, -				i40e_stat_str(&pf->hw, aq_ret), +				ERR_PTR(aq_ret),  				i40e_aq_str(&pf->hw, aq_err));  			if (!aq_tmp) @@ -1313,9 +1314,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,  			int aq_err = pf->hw.aq.asq_last_status;  			dev_err(&pf->pdev->dev, -				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", +				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",  				vf->vf_id, -				i40e_stat_str(&pf->hw, aq_ret), +				ERR_PTR(aq_ret),  				i40e_aq_str(&pf->hw, aq_err));  			if (!aq_tmp) @@ -1339,13 +1340,13 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,   * Called from the VF to configure the promiscuous mode of   * VF vsis and from the VF reset path to reset promiscuous mode.   **/ -static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, -						   u16 vsi_id, -						   bool allmulti, -						   bool alluni) +static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, +					   u16 vsi_id, +					   bool allmulti, +					   bool alluni)  { -	i40e_status aq_ret = I40E_SUCCESS;  	struct i40e_pf *pf = vf->pf; +	int aq_ret = I40E_SUCCESS;  	struct i40e_vsi *vsi;  	u16 num_vlans;  	s16 *vl; @@ -1578,6 +1579,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)  	i40e_cleanup_reset_vf(vf);  	i40e_flush(hw); +	usleep_range(20000, 40000);  	clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);  	return true; @@ -1701,6 +1703,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)  	}  	i40e_flush(hw); +	usleep_range(20000, 40000);  	clear_bit(__I40E_VF_DISABLE, pf->state);  	return true; @@ -1953,7 +1956,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,  	struct i40e_pf *pf;  	struct i40e_hw *hw;  	int abs_vf_id; -	i40e_status aq_ret; +	int aq_ret;  	/* validate the request */  	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) @@ -1985,7 +1988,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,   **/  static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,  				   enum virtchnl_ops opcode, -				   i40e_status retval) +				   int retval)  {  	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);  } @@ -2089,9 +2092,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)  {  	struct virtchnl_vf_resource *vfres = NULL;  	struct i40e_pf *pf = vf->pf; -	i40e_status aq_ret = 0;  	struct i40e_vsi *vsi;  	int num_vsis = 1; +	int aq_ret = 0;  	size_t len = 0;  	int ret; @@ -2121,11 +2124,11 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;  	if (i40e_vf_client_capable(pf, vf->vf_id) && -	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { -		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; -		set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); +	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) { +		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA; +		set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);  	} else { -		clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); +		clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);  	}  	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { @@ -2219,9 +2222,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)  	struct virtchnl_promisc_info *info =  	    (struct virtchnl_promisc_info *)msg;  	struct i40e_pf *pf = vf->pf; -	i40e_status aq_ret = 0;  	bool allmulti = false;  	bool alluni = false; +	int aq_ret = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {  		aq_ret = I40E_ERR_PARAM; @@ -2306,10 +2309,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)  	struct virtchnl_queue_pair_info *qpi;  	u16 vsi_id, vsi_queue_id = 0;  	struct i40e_pf *pf = vf->pf; -	i40e_status aq_ret = 0;  	int i, j = 0, idx = 0;  	struct i40e_vsi *vsi;  	u16 num_qps_all = 0; +	int aq_ret = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {  		aq_ret = I40E_ERR_PARAM; @@ -2456,8 +2459,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)  	struct virtchnl_irq_map_info *irqmap_info =  	    (struct virtchnl_irq_map_info *)msg;  	struct virtchnl_vector_map *map; +	int aq_ret = 0;  	u16 vsi_id; -	i40e_status aq_ret = 0;  	int i;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -2572,7 +2575,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)  	struct virtchnl_queue_select *vqs =  	    (struct virtchnl_queue_select *)msg;  	struct i40e_pf *pf = vf->pf; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	int i;  	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { @@ -2630,7 +2633,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)  	struct virtchnl_queue_select *vqs =  	    (struct virtchnl_queue_select *)msg;  	struct i40e_pf *pf = vf->pf; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {  		aq_ret = I40E_ERR_PARAM; @@ -2781,7 +2784,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)  	    (struct virtchnl_queue_select *)msg;  	struct i40e_pf *pf = vf->pf;  	struct i40e_eth_stats stats; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	struct i40e_vsi *vsi;  	memset(&stats, 0, sizeof(struct i40e_eth_stats)); @@ -2924,7 +2927,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)  	    (struct virtchnl_ether_addr_list *)msg;  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL; -	i40e_status ret = 0; +	int ret = 0;  	int i;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || @@ -2996,7 +2999,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)  	bool was_unimac_deleted = false;  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL; -	i40e_status ret = 0; +	int ret = 0;  	int i;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || @@ -3069,7 +3072,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)  	    (struct virtchnl_vlan_filter_list *)msg;  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	int i;  	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && @@ -3140,7 +3143,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)  	    (struct virtchnl_vlan_filter_list *)msg;  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	int i;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || @@ -3185,21 +3188,21 @@ error_param:  }  /** - * i40e_vc_iwarp_msg + * i40e_vc_rdma_msg   * @vf: pointer to the VF info   * @msg: pointer to the msg buffer   * @msglen: msg length   *   * called from the VF for the iwarp msgs   **/ -static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  {  	struct i40e_pf *pf = vf->pf;  	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || -	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { +	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {  		aq_ret = I40E_ERR_PARAM;  		goto error_param;  	} @@ -3209,42 +3212,42 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  error_param:  	/* send the response to the VF */ -	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, +	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,  				       aq_ret);  }  /** - * i40e_vc_iwarp_qvmap_msg + * i40e_vc_rdma_qvmap_msg   * @vf: pointer to the VF info   * @msg: pointer to the msg buffer   * @config: config qvmap or release it   *   * called from the VF for the iwarp msgs   **/ -static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) +static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)  { -	struct virtchnl_iwarp_qvlist_info *qvlist_info = -				(struct virtchnl_iwarp_qvlist_info *)msg; -	i40e_status aq_ret = 0; +	struct virtchnl_rdma_qvlist_info *qvlist_info = +				(struct virtchnl_rdma_qvlist_info *)msg; +	int aq_ret = 0;  	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || -	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) { +	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {  		aq_ret = I40E_ERR_PARAM;  		goto error_param;  	}  	if (config) { -		if (i40e_config_iwarp_qvlist(vf, qvlist_info)) +		if (i40e_config_rdma_qvlist(vf, qvlist_info))  			aq_ret = I40E_ERR_PARAM;  	} else { -		i40e_release_iwarp_qvlist(vf); +		i40e_release_rdma_qvlist(vf);  	}  error_param:  	/* send the response to the VF */  	return i40e_vc_send_resp_to_vf(vf, -			       config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : -			       VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, +			       config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP : +			       VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,  			       aq_ret);  } @@ -3261,7 +3264,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)  		(struct virtchnl_rss_key *)msg;  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||  	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || @@ -3291,7 +3294,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)  		(struct virtchnl_rss_lut *)msg;  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	u16 i;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || @@ -3326,7 +3329,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)  {  	struct virtchnl_rss_hena *vrh = NULL;  	struct i40e_pf *pf = vf->pf; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	int len = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -3363,7 +3366,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)  		(struct virtchnl_rss_hena *)msg;  	struct i40e_pf *pf = vf->pf;  	struct i40e_hw *hw = &pf->hw; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {  		aq_ret = I40E_ERR_PARAM; @@ -3387,8 +3390,8 @@ err:   **/  static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)  { -	i40e_status aq_ret = 0;  	struct i40e_vsi *vsi; +	int aq_ret = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {  		aq_ret = I40E_ERR_PARAM; @@ -3413,8 +3416,8 @@ err:   **/  static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)  { -	i40e_status aq_ret = 0;  	struct i40e_vsi *vsi; +	int aq_ret = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {  		aq_ret = I40E_ERR_PARAM; @@ -3613,8 +3616,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)  			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);  		if (ret)  			dev_err(&pf->pdev->dev, -				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n", -				vf->vf_id, i40e_stat_str(&pf->hw, ret), +				"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", +				vf->vf_id, ERR_PTR(ret),  				i40e_aq_str(&pf->hw,  					    pf->hw.aq.asq_last_status)); @@ -3640,7 +3643,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL;  	struct hlist_node *node; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	int i, ret;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -3716,8 +3719,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)  		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);  	if (ret) {  		dev_err(&pf->pdev->dev, -			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n", -			vf->vf_id, i40e_stat_str(&pf->hw, ret), +			"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n", +			vf->vf_id, ERR_PTR(ret),  			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		goto err;  	} @@ -3771,7 +3774,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)  	struct i40e_cloud_filter *cfilter = NULL;  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	int i, ret;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -3850,8 +3853,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)  		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);  	if (ret) {  		dev_err(&pf->pdev->dev, -			"VF %d: Failed to add cloud filter, err %s aq_err %s\n", -			vf->vf_id, i40e_stat_str(&pf->hw, ret), +			"VF %d: Failed to add cloud filter, err %pe aq_err %s\n", +			vf->vf_id, ERR_PTR(ret),  			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));  		goto err_free;  	} @@ -3880,7 +3883,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)  	struct i40e_pf *pf = vf->pf;  	struct i40e_link_status *ls = &pf->hw.phy.link_info;  	int i, adq_request_qps = 0; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	u64 speed = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { @@ -3992,7 +3995,7 @@ err:  static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)  {  	struct i40e_pf *pf = vf->pf; -	i40e_status aq_ret = 0; +	int aq_ret = 0;  	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {  		aq_ret = I40E_ERR_PARAM; @@ -4110,14 +4113,14 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,  	case VIRTCHNL_OP_GET_STATS:  		ret = i40e_vc_get_stats_msg(vf, msg);  		break; -	case VIRTCHNL_OP_IWARP: -		ret = i40e_vc_iwarp_msg(vf, msg, msglen); +	case VIRTCHNL_OP_RDMA: +		ret = i40e_vc_rdma_msg(vf, msg, msglen);  		break; -	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: -		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true); +	case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: +		ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);  		break; -	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: -		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false); +	case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: +		ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);  		break;  	case VIRTCHNL_OP_CONFIG_RSS_KEY:  		ret = i40e_vc_config_rss_key(vf, msg); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 358bbdb58795..895b8feb2567 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -34,7 +34,7 @@ enum i40e_queue_ctrl {  enum i40e_vf_states {  	I40E_VF_STATE_INIT = 0,  	I40E_VF_STATE_ACTIVE, -	I40E_VF_STATE_IWARPENA, +	I40E_VF_STATE_RDMAENA,  	I40E_VF_STATE_DISABLED,  	I40E_VF_STATE_MC_PROMISC,  	I40E_VF_STATE_UC_PROMISC, @@ -46,7 +46,7 @@ enum i40e_vf_states {  enum i40e_vf_capabilities {  	I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,  	I40E_VIRTCHNL_VF_CAP_L2, -	I40E_VIRTCHNL_VF_CAP_IWARP, +	I40E_VIRTCHNL_VF_CAP_RDMA,  };  /* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI. @@ -108,7 +108,7 @@ struct i40e_vf {  	u16 num_cloud_filters;  	/* RDMA Client */ -	struct virtchnl_iwarp_qvlist_info *qvlist_info; +	struct virtchnl_rdma_qvlist_info *qvlist_info;  };  void i40e_free_vfs(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 0d1bab4ac1b0..232bc61d9eee 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -30,6 +30,7 @@  #include <linux/jiffies.h>  #include <net/ip6_checksum.h>  #include <net/pkt_cls.h> +#include <net/pkt_sched.h>  #include <net/udp.h>  #include <net/tc_act/tc_gact.h>  #include <net/tc_act/tc_mirred.h> @@ -249,6 +250,7 @@ struct iavf_cloud_filter {  /* board specific private data structure */  struct iavf_adapter { +	struct workqueue_struct *wq;  	struct work_struct reset_task;  	struct work_struct adminq_task;  	struct delayed_work client_task; @@ -275,8 +277,8 @@ struct iavf_adapter {  	u64 hw_csum_rx_error;  	u32 rx_desc_count;  	int num_msix_vectors; -	int num_iwarp_msix; -	int iwarp_base_vector; +	int num_rdma_msix; +	int rdma_base_vector;  	u32 client_pending;  	struct iavf_client_instance *cinst;  	struct msix_entry *msix_entries; @@ -383,7 +385,7 @@ struct iavf_adapter {  	enum virtchnl_ops current_op;  #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \  			    (_a)->vf_res->vf_cap_flags & \ -				VIRTCHNL_VF_OFFLOAD_IWARP : \ +				VIRTCHNL_VF_OFFLOAD_RDMA : \  			    0)  #define CLIENT_ENABLED(_a) ((_a)->cinst)  /* RSS by the PF should be preferred over RSS via other methods. */ @@ -459,7 +461,6 @@ struct iavf_device {  /* needed by iavf_ethtool.c */  extern char iavf_driver_name[]; -extern struct workqueue_struct *iavf_wq;  static inline const char *iavf_state_str(enum iavf_state_t state)  { diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c index 0c77e4171808..93c903c02c64 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_client.c +++ b/drivers/net/ethernet/intel/iavf/iavf_client.c @@ -127,7 +127,7 @@ void iavf_notify_client_open(struct iavf_vsi *vsi)  }  /** - * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map + * iavf_client_release_qvlist - send a message to the PF to release rdma qv map   * @ldev: pointer to L2 context.   *   * Return 0 on success or < 0 on error @@ -141,12 +141,12 @@ static int iavf_client_release_qvlist(struct iavf_info *ldev)  		return -EAGAIN;  	err = iavf_aq_send_msg_to_pf(&adapter->hw, -				     VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, +				     VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,  				     IAVF_SUCCESS, NULL, 0, NULL);  	if (err)  		dev_err(&adapter->pdev->dev, -			"Unable to send iWarp vector release message to PF, error %d, aq status %d\n", +			"Unable to send RDMA vector release message to PF, error %d, aq status %d\n",  			err, adapter->hw.aq.asq_last_status);  	return err; @@ -215,9 +215,9 @@ iavf_client_add_instance(struct iavf_adapter *adapter)  	cinst->lan_info.params = params;  	set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state); -	cinst->lan_info.msix_count = adapter->num_iwarp_msix; +	cinst->lan_info.msix_count = adapter->num_rdma_msix;  	cinst->lan_info.msix_entries = -			&adapter->msix_entries[adapter->iwarp_base_vector]; +			&adapter->msix_entries[adapter->rdma_base_vector];  	mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,  			       struct netdev_hw_addr, list); @@ -425,17 +425,17 @@ static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,  	if (adapter->aq_required)  		return -EAGAIN; -	err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, +	err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA,  				     IAVF_SUCCESS, msg, len, NULL);  	if (err) -		dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", +		dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n",  			err, adapter->hw.aq.asq_last_status);  	return err;  }  /** - * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map + * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map   * @ldev: pointer to L2 context.   * @client: Client pointer.   * @qvlist_info: queue and vector list @@ -446,7 +446,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,  				    struct iavf_client *client,  				    struct iavf_qvlist_info *qvlist_info)  { -	struct virtchnl_iwarp_qvlist_info *v_qvlist_info; +	struct virtchnl_rdma_qvlist_info *v_qvlist_info;  	struct iavf_adapter *adapter = ldev->vf;  	struct iavf_qv_info *qv_info;  	enum iavf_status err; @@ -463,23 +463,23 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,  			continue;  		v_idx = qv_info->v_idx;  		if ((v_idx >= -		    (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) || -		    (v_idx < adapter->iwarp_base_vector)) +		    (adapter->rdma_base_vector + adapter->num_rdma_msix)) || +		    (v_idx < adapter->rdma_base_vector))  			return -EINVAL;  	} -	v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info; +	v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info;  	msg_size = struct_size(v_qvlist_info, qv_info,  			       v_qvlist_info->num_vectors - 1); -	adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); +	adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP);  	err = iavf_aq_send_msg_to_pf(&adapter->hw, -				VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS, +				VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS,  				(u8 *)v_qvlist_info, msg_size, NULL);  	if (err) {  		dev_err(&adapter->pdev->dev, -			"Unable to send iWarp vector config message to PF, error %d, aq status %d\n", +			"Unable to send RDMA vector config message to PF, error %d, aq status %d\n",  			err, adapter->hw.aq.asq_last_status);  		goto out;  	} @@ -488,7 +488,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,  	for (i = 0; i < 5; i++) {  		msleep(100);  		if (!(adapter->client_pending & -		      BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { +		      BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) {  			err = 0;  			break;  		} diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h index 9a7cf39ea75a..c5d51d7dc7cc 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_client.h +++ b/drivers/net/ethernet/intel/iavf/iavf_client.h @@ -159,7 +159,7 @@ struct iavf_client {  #define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE	BIT(0)  #define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS	BIT(2)  	u8 type; -#define IAVF_CLIENT_IWARP 0 +#define IAVF_CLIENT_RDMA 0  	struct iavf_client_ops *ops;	/* client ops provided by the client */  }; diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 34e46a23894f..16c490965b61 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -223,8 +223,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)  		return "IAVF_ERR_ADMIN_QUEUE_FULL";  	case IAVF_ERR_ADMIN_QUEUE_NO_WORK:  		return "IAVF_ERR_ADMIN_QUEUE_NO_WORK"; -	case IAVF_ERR_BAD_IWARP_CQE: -		return "IAVF_ERR_BAD_IWARP_CQE"; +	case IAVF_ERR_BAD_RDMA_CQE: +		return "IAVF_ERR_BAD_RDMA_CQE";  	case IAVF_ERR_NVM_BLANK_MODE:  		return "IAVF_ERR_NVM_BLANK_MODE";  	case IAVF_ERR_NOT_IMPLEMENTED: diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index a056e1545615..6f171d1d85b7 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -147,7 +147,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,   * @ring: the ring to copy   *   * Queue statistics must be copied while protected by - * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats. + * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.   * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the   * ring pointer is null, zero out the queue stat values and update the data   * pointer. Otherwise safely copy the stats from the ring into the supplied @@ -165,14 +165,14 @@ iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)  	/* To avoid invalid statistics values, ensure that we keep retrying  	 * the copy until we get a consistent value according to -	 * u64_stats_fetch_retry_irq. But first, make sure our ring is +	 * u64_stats_fetch_retry. But first, make sure our ring is  	 * non-null before attempting to access its syncp.  	 */  	do { -		start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); +		start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);  		for (i = 0; i < size; i++)  			iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); -	} while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); +	} while (ring && u64_stats_fetch_retry(&ring->syncp, start));  	/* Once we successfully copy the stats in, update the data pointer */  	*data += size; @@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)  	if (changed_flags & IAVF_FLAG_LEGACY_RX) {  		if (netif_running(netdev)) {  			adapter->flags |= IAVF_FLAG_RESET_NEEDED; -			queue_work(iavf_wq, &adapter->reset_task); +			queue_work(adapter->wq, &adapter->reset_task);  		}  	} @@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev,  	if (netif_running(netdev)) {  		adapter->flags |= IAVF_FLAG_RESET_NEEDED; -		queue_work(iavf_wq, &adapter->reset_task); +		queue_work(adapter->wq, &adapter->reset_task);  	}  	return 0; @@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx  	adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;  	spin_unlock_bh(&adapter->fdir_fltr_lock); -	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);  ret:  	if (err && fltr) @@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx  	spin_unlock_bh(&adapter->fdir_fltr_lock);  	if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) -		mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);  	return err;  } @@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,  	spin_unlock_bh(&adapter->adv_rss_lock);  	if (!err) -		mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);  	mutex_unlock(&adapter->crit_lock); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index f71e132ede09..3273aeb8fa67 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")  MODULE_LICENSE("GPL v2");  static const struct net_device_ops iavf_netdev_ops; -struct workqueue_struct *iavf_wq;  int iavf_status_to_errno(enum iavf_status status)  { @@ -106,7 +105,7 @@ int iavf_status_to_errno(enum iavf_status status)  	case IAVF_ERR_SRQ_ENABLED:  	case IAVF_ERR_ADMIN_QUEUE_ERROR:  	case IAVF_ERR_ADMIN_QUEUE_FULL: -	case IAVF_ERR_BAD_IWARP_CQE: +	case IAVF_ERR_BAD_RDMA_CQE:  	case IAVF_ERR_NVM_BLANK_MODE:  	case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:  	case IAVF_ERR_DIAG_TEST_FAILED: @@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)  	if (!(adapter->flags &  	      (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {  		adapter->flags |= IAVF_FLAG_RESET_NEEDED; -		queue_work(iavf_wq, &adapter->reset_task); +		queue_work(adapter->wq, &adapter->reset_task);  	}  } @@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)  void iavf_schedule_request_stats(struct iavf_adapter *adapter)  {  	adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; -	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);  }  /** @@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)  	if (adapter->state != __IAVF_REMOVE)  		/* schedule work on the private workqueue */ -		queue_work(iavf_wq, &adapter->adminq_task); +		queue_work(adapter->wq, &adapter->adminq_task);  	return IRQ_HANDLED;  } @@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter,  	/* schedule the watchdog task to immediately process the request */  	if (f) { -		queue_work(iavf_wq, &adapter->watchdog_task.work); +		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);  		return 0;  	}  	return -ENOMEM; @@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)  	adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;  	if (CLIENT_ENABLED(adapter))  		adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; -	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);  }  /** @@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter)  		adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;  	} -	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);  }  /** @@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,  	if (aq_required) {  		adapter->aq_required |= aq_required; -		mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);  	}  } @@ -2693,6 +2692,15 @@ static void iavf_watchdog_task(struct work_struct *work)  		goto restart_watchdog;  	} +	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && +	    adapter->netdev_registered && +	    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) && +	    rtnl_trylock()) { +		netdev_update_features(adapter->netdev); +		rtnl_unlock(); +		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; +	} +  	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)  		iavf_change_state(adapter, __IAVF_COMM_FAILED); @@ -2700,7 +2708,7 @@ static void iavf_watchdog_task(struct work_struct *work)  		adapter->aq_required = 0;  		adapter->current_op = VIRTCHNL_OP_UNKNOWN;  		mutex_unlock(&adapter->crit_lock); -		queue_work(iavf_wq, &adapter->reset_task); +		queue_work(adapter->wq, &adapter->reset_task);  		return;  	} @@ -2708,31 +2716,31 @@ static void iavf_watchdog_task(struct work_struct *work)  	case __IAVF_STARTUP:  		iavf_startup(adapter);  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, +		queue_delayed_work(adapter->wq, &adapter->watchdog_task,  				   msecs_to_jiffies(30));  		return;  	case __IAVF_INIT_VERSION_CHECK:  		iavf_init_version_check(adapter);  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, +		queue_delayed_work(adapter->wq, &adapter->watchdog_task,  				   msecs_to_jiffies(30));  		return;  	case __IAVF_INIT_GET_RESOURCES:  		iavf_init_get_resources(adapter);  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, +		queue_delayed_work(adapter->wq, &adapter->watchdog_task,  				   msecs_to_jiffies(1));  		return;  	case __IAVF_INIT_EXTENDED_CAPS:  		iavf_init_process_extended_caps(adapter);  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, +		queue_delayed_work(adapter->wq, &adapter->watchdog_task,  				   msecs_to_jiffies(1));  		return;  	case __IAVF_INIT_CONFIG_ADAPTER:  		iavf_init_config_adapter(adapter);  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, +		queue_delayed_work(adapter->wq, &adapter->watchdog_task,  				   msecs_to_jiffies(1));  		return;  	case __IAVF_INIT_FAILED: @@ -2751,14 +2759,14 @@ static void iavf_watchdog_task(struct work_struct *work)  			adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;  			iavf_shutdown_adminq(hw);  			mutex_unlock(&adapter->crit_lock); -			queue_delayed_work(iavf_wq, +			queue_delayed_work(adapter->wq,  					   &adapter->watchdog_task, (5 * HZ));  			return;  		}  		/* Try again from failed step*/  		iavf_change_state(adapter, adapter->last_state);  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); +		queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);  		return;  	case __IAVF_COMM_FAILED:  		if (test_bit(__IAVF_IN_REMOVE_TASK, @@ -2789,13 +2797,14 @@ static void iavf_watchdog_task(struct work_struct *work)  		adapter->aq_required = 0;  		adapter->current_op = VIRTCHNL_OP_UNKNOWN;  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, +		queue_delayed_work(adapter->wq,  				   &adapter->watchdog_task,  				   msecs_to_jiffies(10));  		return;  	case __IAVF_RESETTING:  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); +		queue_delayed_work(adapter->wq, &adapter->watchdog_task, +				   HZ * 2);  		return;  	case __IAVF_DOWN:  	case __IAVF_DOWN_PENDING: @@ -2834,9 +2843,9 @@ static void iavf_watchdog_task(struct work_struct *work)  		adapter->aq_required = 0;  		adapter->current_op = VIRTCHNL_OP_UNKNOWN;  		dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); -		queue_work(iavf_wq, &adapter->reset_task); +		queue_work(adapter->wq, &adapter->reset_task);  		mutex_unlock(&adapter->crit_lock); -		queue_delayed_work(iavf_wq, +		queue_delayed_work(adapter->wq,  				   &adapter->watchdog_task, HZ * 2);  		return;  	} @@ -2845,12 +2854,13 @@ static void iavf_watchdog_task(struct work_struct *work)  	mutex_unlock(&adapter->crit_lock);  restart_watchdog:  	if (adapter->state >= __IAVF_DOWN) -		queue_work(iavf_wq, &adapter->adminq_task); +		queue_work(adapter->wq, &adapter->adminq_task);  	if (adapter->aq_required) -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, +		queue_delayed_work(adapter->wq, &adapter->watchdog_task,  				   msecs_to_jiffies(20));  	else -		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); +		queue_delayed_work(adapter->wq, &adapter->watchdog_task, +				   HZ * 2);  }  /** @@ -2952,7 +2962,7 @@ static void iavf_reset_task(struct work_struct *work)  	 */  	if (!mutex_trylock(&adapter->crit_lock)) {  		if (adapter->state != __IAVF_REMOVE) -			queue_work(iavf_wq, &adapter->reset_task); +			queue_work(adapter->wq, &adapter->reset_task);  		goto reset_finish;  	} @@ -3116,7 +3126,7 @@ continue_reset:  	bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);  	bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); -	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); +	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);  	/* We were running when the reset started, so we need to restore some  	 * state here. @@ -3208,7 +3218,7 @@ static void iavf_adminq_task(struct work_struct *work)  		if (adapter->state == __IAVF_REMOVE)  			return; -		queue_work(iavf_wq, &adapter->adminq_task); +		queue_work(adapter->wq, &adapter->adminq_task);  		goto out;  	} @@ -3232,24 +3242,6 @@ static void iavf_adminq_task(struct work_struct *work)  	} while (pending);  	mutex_unlock(&adapter->crit_lock); -	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { -		if (adapter->netdev_registered || -		    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { -			struct net_device *netdev = adapter->netdev; - -			rtnl_lock(); -			netdev_update_features(netdev); -			rtnl_unlock(); -			/* Request VLAN offload settings */ -			if (VLAN_V2_ALLOWED(adapter)) -				iavf_set_vlan_offload_features -					(adapter, 0, netdev->features); - -			iavf_set_queue_vlan_tag_loc(adapter); -		} - -		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; -	}  	if ((adapter->flags &  	     (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||  	    adapter->state == __IAVF_RESETTING) @@ -3850,7 +3842,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,  				field_flags |= IAVF_CLOUD_FIELD_IIP;  			} else {  				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", -					be32_to_cpu(match.mask->dst)); +					be32_to_cpu(match.mask->src));  				return -EINVAL;  			}  		} @@ -4349,7 +4341,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)  	if (netif_running(netdev)) {  		adapter->flags |= IAVF_FLAG_RESET_NEEDED; -		queue_work(iavf_wq, &adapter->reset_task); +		queue_work(adapter->wq, &adapter->reset_task);  	}  	return 0; @@ -4827,7 +4819,7 @@ static void iavf_shutdown(struct pci_dev *pdev)  		iavf_close(netdev);  	if (iavf_lock_timeout(&adapter->crit_lock, 5000)) -		dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); +		dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);  	/* Prevent the watchdog from running. */  	iavf_change_state(adapter, __IAVF_REMOVE);  	adapter->aq_required = 0; @@ -4876,8 +4868,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		goto err_pci_reg;  	} -	pci_enable_pcie_error_reporting(pdev); -  	pci_set_master(pdev);  	netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter), @@ -4898,6 +4888,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	hw = &adapter->hw;  	hw->back = adapter; +	adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, +					      iavf_driver_name); +	if (!adapter->wq) { +		err = -ENOMEM; +		goto err_alloc_wq; +	} +  	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;  	iavf_change_state(adapter, __IAVF_STARTUP); @@ -4942,7 +4939,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	INIT_WORK(&adapter->adminq_task, iavf_adminq_task);  	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);  	INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); -	queue_delayed_work(iavf_wq, &adapter->watchdog_task, +	queue_delayed_work(adapter->wq, &adapter->watchdog_task,  			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));  	/* Setup the wait queue for indicating transition to down status */ @@ -4954,9 +4951,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	return 0;  err_ioremap: +	destroy_workqueue(adapter->wq); +err_alloc_wq:  	free_netdev(netdev);  err_alloc_etherdev: -	pci_disable_pcie_error_reporting(pdev);  	pci_release_regions(pdev);  err_pci_reg:  err_dma: @@ -5023,7 +5021,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)  		return err;  	} -	queue_work(iavf_wq, &adapter->reset_task); +	queue_work(adapter->wq, &adapter->reset_task);  	netif_device_attach(adapter->netdev); @@ -5088,7 +5086,7 @@ static void iavf_remove(struct pci_dev *pdev)  	}  	mutex_lock(&adapter->crit_lock); -	dev_info(&adapter->pdev->dev, "Remove device\n"); +	dev_info(&adapter->pdev->dev, "Removing device\n");  	iavf_change_state(adapter, __IAVF_REMOVE);  	iavf_request_reset(adapter); @@ -5170,9 +5168,9 @@ static void iavf_remove(struct pci_dev *pdev)  	}  	spin_unlock_bh(&adapter->adv_rss_lock); -	free_netdev(netdev); +	destroy_workqueue(adapter->wq); -	pci_disable_pcie_error_reporting(pdev); +	free_netdev(netdev);  	pci_disable_device(pdev);  } @@ -5196,24 +5194,11 @@ static struct pci_driver iavf_driver = {   **/  static int __init iavf_init_module(void)  { -	int ret; -  	pr_info("iavf: %s\n", iavf_driver_string);  	pr_info("%s\n", iavf_copyright); -	iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, -				  iavf_driver_name); -	if (!iavf_wq) { -		pr_err("%s: Failed to create workqueue\n", iavf_driver_name); -		return -ENOMEM; -	} - -	ret = pci_register_driver(&iavf_driver); -	if (ret) -		destroy_workqueue(iavf_wq); - -	return ret; +	return pci_register_driver(&iavf_driver);  }  module_init(iavf_init_module); @@ -5227,7 +5212,6 @@ module_init(iavf_init_module);  static void __exit iavf_exit_module(void)  {  	pci_unregister_driver(&iavf_driver); -	destroy_workqueue(iavf_wq);  }  module_exit(iavf_exit_module); diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h index 2ea5c7c339bc..0e493ee9e9d1 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_status.h +++ b/drivers/net/ethernet/intel/iavf/iavf_status.h @@ -64,7 +64,7 @@ enum iavf_status {  	IAVF_ERR_BUF_TOO_SHORT			= -55,  	IAVF_ERR_ADMIN_QUEUE_FULL		= -56,  	IAVF_ERR_ADMIN_QUEUE_NO_WORK		= -57, -	IAVF_ERR_BAD_IWARP_CQE			= -58, +	IAVF_ERR_BAD_RDMA_CQE			= -58,  	IAVF_ERR_NVM_BLANK_MODE			= -59,  	IAVF_ERR_NOT_IMPLEMENTED		= -60,  	IAVF_ERR_PE_DOORBELL_NOT_ENABLED	= -61, diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 24a701fd140e..6d23338604bb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,  			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {  				adapter->flags |= IAVF_FLAG_RESET_PENDING;  				dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); -				queue_work(iavf_wq, &adapter->reset_task); +				queue_work(adapter->wq, &adapter->reset_task);  			}  			break;  		default: @@ -2226,6 +2226,14 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,  		iavf_process_config(adapter);  		adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; + +		/* Request VLAN offload settings */ +		if (VLAN_V2_ALLOWED(adapter)) +			iavf_set_vlan_offload_features(adapter, 0, +						       netdev->features); + +		iavf_set_queue_vlan_tag_loc(adapter); +  		was_mac_changed = !ether_addr_equal(netdev->dev_addr,  						    adapter->hw.mac.addr); @@ -2290,7 +2298,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,  		if (v_opcode != adapter->current_op)  			return;  		break; -	case VIRTCHNL_OP_IWARP: +	case VIRTCHNL_OP_RDMA:  		/* Gobble zero-length replies from the PF. They indicate that  		 * a previous message was received OK, and the client doesn't  		 * care about that. @@ -2299,9 +2307,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,  			iavf_notify_client_message(&adapter->vsi, msg, msglen);  		break; -	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: +	case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:  		adapter->client_pending &= -				~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); +				~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP));  		break;  	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {  		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 9183d480b70b..f269952d207d 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -28,6 +28,7 @@ ice-y := ice_main.o	\  	 ice_flow.o	\  	 ice_idc.o	\  	 ice_devlink.o	\ +	 ice_ddp.o	\  	 ice_fw_update.o \  	 ice_lag.o	\  	 ice_ethtool.o  \ @@ -42,8 +43,8 @@ ice-$(CONFIG_PCI_IOV) +=	\  	ice_vf_vsi_vlan_ops.o	\  	ice_vf_lib.o  ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o -ice-$(CONFIG_TTY) += ice_gnss.o  ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o  ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o  ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o  ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o +ice-$(CONFIG_ICE_GNSS) += ice_gnss.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 001500afc4a6..b0e29e342401 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -39,7 +39,9 @@  #include <linux/avf/virtchnl.h>  #include <linux/cpu_rmap.h>  #include <linux/dim.h> +#include <linux/gnss.h>  #include <net/pkt_cls.h> +#include <net/pkt_sched.h>  #include <net/tc_act/tc_mirred.h>  #include <net/tc_act/tc_gact.h>  #include <net/ip.h> @@ -121,6 +123,8 @@  #define ICE_MAX_MTU	(ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) +#define ICE_MAX_TSO_SIZE 131072 +  #define ICE_UP_TABLE_TRANSLATE(val, i) \  		(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \  		  ICE_AQ_VSI_UP_TABLE_UP##i##_M) @@ -137,6 +141,21 @@   */  #define ICE_BW_KBPS_DIVISOR		125 +/* Default recipes have priority 4 and below, hence priority values between 5..7 + * can be used as filter priority for advanced switch filter (advanced switch + * filters need new recipe to be created for specified extraction sequence + * because default recipe extraction sequence does not represent custom + * extraction) + */ +#define ICE_SWITCH_FLTR_PRIO_QUEUE	7 +/* prio 6 is reserved for future use (e.g. switch filter with L3 fields + + * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as + * SYN/FIN/RST)) + */ +#define ICE_SWITCH_FLTR_PRIO_RSVD	6 +#define ICE_SWITCH_FLTR_PRIO_VSI	5 +#define ICE_SWITCH_FLTR_PRIO_QGRP	ICE_SWITCH_FLTR_PRIO_VSI +  /* Macro for each VSI in a PF */  #define ice_for_each_vsi(pf, i) \  	for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) @@ -305,6 +324,11 @@ enum ice_vsi_state {  	ICE_VSI_STATE_NBITS		/* must be last */  }; +struct ice_vsi_stats { +	struct ice_ring_stats **tx_ring_stats;  /* Tx ring stats array */ +	struct ice_ring_stats **rx_ring_stats;  /* Rx ring stats array */ +}; +  /* struct that defines a VSI, associated with a dev */  struct ice_vsi {  	struct net_device *netdev; @@ -332,7 +356,6 @@ struct ice_vsi {  	struct ice_vf *vf;		/* VF associated with this VSI */ -	u16 ethtype;			/* Ethernet protocol for pause frame */  	u16 num_gfltr;  	u16 num_bfltr; @@ -358,6 +381,7 @@ struct ice_vsi {  	/* VSI stats */  	struct rtnl_link_stats64 net_stats; +	struct rtnl_link_stats64 net_stats_prev;  	struct ice_eth_stats eth_stats;  	struct ice_eth_stats eth_stats_prev; @@ -525,6 +549,7 @@ struct ice_pf {  	u16 ctrl_vsi_idx;		/* control VSI index in pf->vsi array */  	struct ice_vsi **vsi;		/* VSIs created by the driver */ +	struct ice_vsi_stats **vsi_stats;  	struct ice_sw *first_sw;	/* first switch created by firmware */  	u16 eswitch_mode;		/* current mode of eswitch */  	struct ice_vfs vfs; @@ -543,9 +568,8 @@ struct ice_pf {  	struct mutex adev_mutex;	/* lock to protect aux device access */  	u32 msg_enable;  	struct ice_ptp ptp; -	struct tty_driver *ice_gnss_tty_driver; -	struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES]; -	struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES]; +	struct gnss_serial *gnss_serial; +	struct gnss_device *gnss_dev;  	u16 num_rdma_msix;		/* Total MSIX vectors for RDMA driver */  	u16 rdma_base_vector; @@ -594,6 +618,8 @@ struct ice_pf {  	u16 num_dmac_chnl_fltrs;  	struct hlist_head tc_flower_fltr_list; +	u64 supported_rxdids; +  	__le64 nvm_phy_type_lo; /* NVM PHY type low */  	__le64 nvm_phy_type_hi; /* NVM PHY type high */  	struct ice_link_default_override_tlv link_dflt_override; @@ -856,7 +882,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev);  void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);  u16 ice_get_avail_txq_count(struct ice_pf *pf);  u16 ice_get_avail_rxq_count(struct ice_pf *pf); -int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);  void ice_update_vsi_stats(struct ice_vsi *vsi);  void ice_update_pf_stats(struct ice_pf *pf);  void @@ -865,7 +891,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,  int ice_up(struct ice_vsi *vsi);  int ice_down(struct ice_vsi *vsi);  int ice_down_up(struct ice_vsi *vsi); -int ice_vsi_cfg(struct ice_vsi *vsi); +int ice_vsi_cfg_lan(struct ice_vsi *vsi);  struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);  int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);  int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); @@ -883,6 +909,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);  int ice_plug_aux_dev(struct ice_pf *pf);  void ice_unplug_aux_dev(struct ice_pf *pf);  int ice_init_rdma(struct ice_pf *pf); +void ice_deinit_rdma(struct ice_pf *pf);  const char *ice_aq_str(enum ice_aq_err aq_err);  bool ice_is_wol_supported(struct ice_hw *hw);  void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); @@ -907,6 +934,8 @@ int ice_open(struct net_device *netdev);  int ice_open_internal(struct net_device *netdev);  int ice_stop(struct net_device *netdev);  void ice_service_task_schedule(struct ice_pf *pf); +int ice_load(struct ice_pf *pf); +void ice_unload(struct ice_pf *pf);  /**   * ice_set_rdma_cap - enable RDMA support diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1bdc70aa979d..838d9b274d68 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -848,9 +848,9 @@ struct ice_aqc_txsched_elem {  	u8 generic;  #define ICE_AQC_ELEM_GENERIC_MODE_M		0x1  #define ICE_AQC_ELEM_GENERIC_PRIO_S		0x1 -#define ICE_AQC_ELEM_GENERIC_PRIO_M	(0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S) +#define ICE_AQC_ELEM_GENERIC_PRIO_M	        GENMASK(3, 1)  #define ICE_AQC_ELEM_GENERIC_SP_S		0x4 -#define ICE_AQC_ELEM_GENERIC_SP_M	(0x1 << ICE_AQC_ELEM_GENERIC_SP_S) +#define ICE_AQC_ELEM_GENERIC_SP_M	        GENMASK(4, 4)  #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S	0x5  #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M	\  	(0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S) @@ -1659,14 +1659,24 @@ struct ice_aqc_lldp_get_mib {  #define ICE_AQ_LLDP_TX_ACTIVE			0  #define ICE_AQ_LLDP_TX_SUSPENDED		1  #define ICE_AQ_LLDP_TX_FLUSHED			3 +/* DCBX mode */ +#define ICE_AQ_LLDP_DCBX_M			GENMASK(7, 6) +#define ICE_AQ_LLDP_DCBX_NA			0 +#define ICE_AQ_LLDP_DCBX_CEE			1 +#define ICE_AQ_LLDP_DCBX_IEEE			2 + +	u8 state; +#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M		BIT(0) +#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED		0 +#define ICE_AQ_LLDP_MIB_CHANGE_PENDING		1 +  /* The following bytes are reserved for the Get LLDP MIB command (0x0A00)   * and in the LLDP MIB Change Event (0x0A01). They are valid for the   * Get LLDP MIB (0x0A00) response only.   */ -	u8 reserved1;  	__le16 local_len;  	__le16 remote_len; -	u8 reserved2[2]; +	u8 reserved[2];  	__le32 addr_high;  	__le32 addr_low;  }; @@ -1677,6 +1687,9 @@ struct ice_aqc_lldp_set_mib_change {  	u8 command;  #define ICE_AQ_LLDP_MIB_UPDATE_ENABLE		0x0  #define ICE_AQ_LLDP_MIB_UPDATE_DIS		0x1 +#define ICE_AQ_LLDP_MIB_PENDING_M		BIT(1) +#define ICE_AQ_LLDP_MIB_PENDING_DISABLE		0 +#define ICE_AQ_LLDP_MIB_PENDING_ENABLE		1  	u8 reserved[15];  }; @@ -2329,6 +2342,7 @@ enum ice_adminq_opc {  	ice_aqc_opc_lldp_set_local_mib			= 0x0A08,  	ice_aqc_opc_lldp_stop_start_specific_agent	= 0x0A09,  	ice_aqc_opc_lldp_filter_ctrl			= 0x0A0A, +	ice_aqc_opc_lldp_execute_pending_mib		= 0x0A0B,  	/* RSS commands */  	ice_aqc_opc_set_rss_key				= 0x0B02, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index e864634d66bc..1911d644dfa8 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)  {  	if (ice_ring_uses_build_skb(rx_ring))  		return ICE_SKB_PAD; -	else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) -		return XDP_PACKET_HEADROOM; -  	return 0;  } @@ -389,7 +386,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)  	 * Indicates the starting address of the descriptor queue defined in  	 * 128 Byte units.  	 */ -	rlan_ctx.base = ring->dma >> 7; +	rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;  	rlan_ctx.qlen = ring->count; @@ -495,7 +492,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)  int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)  {  	struct device *dev = ice_pf_to_dev(ring->vsi->back); -	u16 num_bufs = ICE_DESC_UNUSED(ring); +	u32 num_bufs = ICE_RX_DESC_UNUSED(ring);  	int err;  	ring->rx_buf_len = ring->vsi->rx_buf_len; @@ -503,8 +500,10 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)  	if (ring->vsi->type == ICE_VSI_PF) {  		if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))  			/* coverity[check_return] */ -			xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, -					 ring->q_index, ring->q_vector->napi.napi_id); +			__xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, +					   ring->q_index, +					   ring->q_vector->napi.napi_id, +					   ring->vsi->rx_buf_len);  		ring->xsk_pool = ice_xsk_pool(ring);  		if (ring->xsk_pool) { @@ -524,9 +523,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)  		} else {  			if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))  				/* coverity[check_return] */ -				xdp_rxq_info_reg(&ring->xdp_rxq, -						 ring->netdev, -						 ring->q_index, ring->q_vector->napi.napi_id); +				__xdp_rxq_info_reg(&ring->xdp_rxq, +						   ring->netdev, +						   ring->q_index, +						   ring->q_vector->napi.napi_id, +						   ring->vsi->rx_buf_len);  			err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,  							 MEM_TYPE_PAGE_SHARED, @@ -536,6 +537,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)  		}  	} +	xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq); +	ring->xdp.data = NULL;  	err = ice_setup_rx_ctx(ring);  	if (err) {  		dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 039342a0ed15..c2fda4fa4188 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -208,6 +208,31 @@ bool ice_is_e810t(struct ice_hw *hw)  }  /** + * ice_is_e823 + * @hw: pointer to the hardware structure + * + * returns true if the device is E823-L or E823-C based, false if not. + */ +bool ice_is_e823(struct ice_hw *hw) +{ +	switch (hw->device_id) { +	case ICE_DEV_ID_E823L_BACKPLANE: +	case ICE_DEV_ID_E823L_SFP: +	case ICE_DEV_ID_E823L_10G_BASE_T: +	case ICE_DEV_ID_E823L_1GBE: +	case ICE_DEV_ID_E823L_QSFP: +	case ICE_DEV_ID_E823C_BACKPLANE: +	case ICE_DEV_ID_E823C_QSFP: +	case ICE_DEV_ID_E823C_SFP: +	case ICE_DEV_ID_E823C_10G_BASE_T: +	case ICE_DEV_ID_E823C_SGMII: +		return true; +	default: +		return false; +	} +} + +/**   * ice_clear_pf_cfg - Clear PF configuration   * @hw: pointer to the hardware structure   * @@ -1088,8 +1113,10 @@ int ice_init_hw(struct ice_hw *hw)  	if (status)  		goto err_unroll_cqinit; -	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), -				     sizeof(*hw->port_info), GFP_KERNEL); +	if (!hw->port_info) +		hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), +					     sizeof(*hw->port_info), +					     GFP_KERNEL);  	if (!hw->port_info) {  		status = -ENOMEM;  		goto err_unroll_cqinit; @@ -1105,6 +1132,9 @@ int ice_init_hw(struct ice_hw *hw)  	hw->evb_veb = true; +	/* init xarray for identifying scheduling nodes uniquely */ +	xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); +  	/* Query the allocated resources for Tx scheduler */  	status = ice_sched_query_res_alloc(hw);  	if (status) { @@ -1214,11 +1244,6 @@ void ice_deinit_hw(struct ice_hw *hw)  	ice_free_hw_tbls(hw);  	mutex_destroy(&hw->tnl_lock); -	if (hw->port_info) { -		devm_kfree(ice_hw_to_dev(hw), hw->port_info); -		hw->port_info = NULL; -	} -  	/* Attempt to disable FW logging before shutting down control queues */  	ice_cfg_fw_log(hw, false);  	ice_destroy_all_ctrlq(hw); @@ -2945,8 +2970,8 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)   * Note: In the structure of [phy_type_low, phy_type_high], there should   * be one bit set, as this function will convert one PHY type to its   * speed. - * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned - * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned + * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned   */  static u16  ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) @@ -4600,7 +4625,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,  	q_ctx->q_teid = le32_to_cpu(node.node_teid);  	/* add a leaf node into scheduler tree queue layer */ -	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); +	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);  	if (!status)  		status = ice_sched_replay_q_bw(pi, q_ctx); @@ -4835,7 +4860,7 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,  	for (i = 0; i < num_qsets; i++) {  		node.node_teid = buf->rdma_qsets[i].qset_teid;  		ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, -					 &node); +					 &node, NULL);  		if (ret)  			break;  		qset_teid[i] = le32_to_cpu(node.node_teid); @@ -5501,6 +5526,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)  }  /** + * ice_lldp_execute_pending_mib - execute LLDP pending MIB request + * @hw: pointer to HW struct + */ +int ice_lldp_execute_pending_mib(struct ice_hw *hw) +{ +	struct ice_aq_desc desc; + +	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); + +	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/**   * ice_fw_supports_report_dflt_cfg   * @hw: pointer to the hardware structure   * @@ -5512,3 +5550,39 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)  				     ICE_FW_API_REPORT_DFLT_CFG_MIN,  				     ICE_FW_API_REPORT_DFLT_CFG_PATCH);  } + +/* each of the indexes into the following array match the speed of a return + * value from the list of AQ returned speeds like the range: + * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding + * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this + * array. The array is defined as 15 elements long because the link_speed + * returned by the firmware is a 16 bit * value, but is indexed + * by [fls(speed) - 1] + */ +static const u32 ice_aq_to_link_speed[] = { +	SPEED_10,	/* BIT(0) */ +	SPEED_100, +	SPEED_1000, +	SPEED_2500, +	SPEED_5000, +	SPEED_10000, +	SPEED_20000, +	SPEED_25000, +	SPEED_40000, +	SPEED_50000, +	SPEED_100000,	/* BIT(10) */ +}; + +/** + * ice_get_link_speed - get integer speed from table + * @index: array index from fls(aq speed) - 1 + * + * Returns: u32 value containing integer speed + */ +u32 ice_get_link_speed(u16 index) +{ +	if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) +		return 0; + +	return ice_aq_to_link_speed[index]; +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 8b6712b92e84..8ba5f935a092 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -122,7 +122,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,  	   bool ena_auto_link_update);  int  ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, -	       enum ice_fc_mode fc); +	       enum ice_fc_mode req_mode);  bool  ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,  			struct ice_aqc_set_phy_cfg_data *cfg); @@ -163,6 +163,7 @@ int  ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,  		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,  		  bool write, struct ice_sq_cd *cd); +u32 ice_get_link_speed(u16 index);  int  ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, @@ -198,6 +199,7 @@ void  ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,  		  u64 *prev_stat, u64 *cur_stat);  bool ice_is_e810t(struct ice_hw *hw); +bool ice_is_e823(struct ice_hw *hw);  int  ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,  		     struct ice_aqc_txsched_elem_data *buf); @@ -220,6 +222,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,  bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);  int  ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +int ice_lldp_execute_pending_mib(struct ice_hw *hw);  int  ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,  		u16 bus_addr, __le16 addr, u8 params, u8 *data, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 0b146a0d4205..c557dfc50aad 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -73,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,  	if (!ena_update)  		cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; +	else +		cmd->command |= FIELD_PREP(ICE_AQ_LLDP_MIB_PENDING_M, +					   ICE_AQ_LLDP_MIB_PENDING_ENABLE);  	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);  } @@ -566,7 +569,7 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)   * @tlv: Organization specific TLV   * @dcbcfg: Local store to update ETS REC data   * - * Currently only IEEE 802.1Qaz TLV is supported, all others + * Currently IEEE 802.1Qaz and CEE DCBX TLV are supported, others   * will be returned   */  static void @@ -585,7 +588,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)  		ice_parse_cee_tlv(tlv, dcbcfg);  		break;  	default: -		break; +		break; /* Other OUIs not supported */  	}  } @@ -964,6 +967,42 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)  }  /** + * ice_get_dcb_cfg_from_mib_change + * @pi: port information structure + * @event: pointer to the admin queue receive event + * + * Set DCB configuration from received MIB Change event + */ +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, +				     struct ice_rq_event_info *event) +{ +	struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; +	struct ice_aqc_lldp_get_mib *mib; +	u8 change_type, dcbx_mode; + +	mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + +	change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M,  mib->type); +	if (change_type == ICE_AQ_LLDP_MIB_REMOTE) +		dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; + +	dcbx_mode = FIELD_GET(ICE_AQ_LLDP_DCBX_M, mib->type); + +	switch (dcbx_mode) { +	case ICE_AQ_LLDP_DCBX_IEEE: +		dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; +		ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg); +		break; + +	case ICE_AQ_LLDP_DCBX_CEE: +		pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; +		ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *) +				   event->msg_buf, pi); +		break; +	} +} + +/**   * ice_init_dcb   * @hw: pointer to the HW struct   * @enable_mib_change: enable MIB change event @@ -1580,7 +1619,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,  		/* new TC */  		status = ice_sched_query_elem(pi->hw, teid2, &elem);  		if (!status) -			status = ice_sched_add_node(pi, 1, &elem); +			status = ice_sched_add_node(pi, 1, &elem, NULL);  		if (status)  			break;  		/* update the TC number */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 6abf28a14291..be34650a77d5 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -144,6 +144,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,  		   struct ice_dcbx_cfg *dcbcfg);  int ice_get_dcb_cfg(struct ice_port_info *pi);  int ice_set_dcb_cfg(struct ice_port_info *pi); +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, +				     struct ice_rq_event_info *event);  int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);  int  ice_query_port_ets(struct ice_port_info *pi, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index add90e75f05c..c6d4926f0fcf 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -3,6 +3,7 @@  #include "ice_dcb_lib.h"  #include "ice_dcb_nl.h" +#include "ice_devlink.h"  /**   * ice_dcb_get_ena_tc - return bitmap of enabled TCs @@ -364,6 +365,12 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)  	/* Enable DCB tagging only when more than one TC */  	if (ice_dcb_get_num_tc(new_cfg) > 1) {  		dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n"); +		if (pf->hw.port_info->is_custom_tx_enabled) { +			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure DCB\n"); +			return -EBUSY; +		} +		ice_tear_down_devlink_rate_tree(pf); +  		set_bit(ICE_FLAG_DCB_ENA, pf->flags);  	} else {  		dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n"); @@ -434,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)  		goto out;  	} -	ice_pf_dcb_recfg(pf); +	ice_pf_dcb_recfg(pf, false);  out:  	/* enable previously downed VSIs */ @@ -724,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)  /**   * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs   * @pf: pointer to the PF struct + * @locked: is adev device lock held   *   * Assumed caller has already disabled all VSIs before   * calling this function. Reconfiguring DCB based on   * local_dcbx_cfg.   */ -void ice_pf_dcb_recfg(struct ice_pf *pf) +void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)  {  	struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;  	struct iidc_event *event; @@ -776,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)  		if (vsi->type == ICE_VSI_PF)  			ice_dcbnl_set_all(vsi);  	} -	/* Notify the AUX drivers that TC change is finished */ -	event = kzalloc(sizeof(*event), GFP_KERNEL); -	if (!event) -		return; +	if (!locked) { +		/* Notify the AUX drivers that TC change is finished */ +		event = kzalloc(sizeof(*event), GFP_KERNEL); +		if (!event) +			return; -	set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); -	ice_send_event_to_aux(pf, event); -	kfree(event); +		set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); +		ice_send_event_to_aux(pf, event); +		kfree(event); +	}  }  /** @@ -852,7 +862,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)  	if (err)  		goto dcb_init_err; -	return err; +	return 0;  dcb_init_err:  	dev_err(dev, "DCB init failed\n"); @@ -874,6 +884,9 @@ void ice_update_dcb_stats(struct ice_pf *pf)  	prev_ps = &pf->stats_prev;  	cur_ps = &pf->stats; +	if (ice_is_reset_in_progress(pf->state)) +		pf->stat_prev_loaded = false; +  	for (i = 0; i < 8; i++) {  		ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),  				  pf->stat_prev_loaded, @@ -934,6 +947,16 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,  }  /** + * ice_dcb_is_mib_change_pending - Check if MIB change is pending + * @state: MIB change state + */ +static bool ice_dcb_is_mib_change_pending(u8 state) +{ +	return ICE_AQ_LLDP_MIB_CHANGE_PENDING == +		FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state); +} + +/**   * ice_dcb_process_lldp_set_mib_change - Process MIB change   * @pf: ptr to ice_pf   * @event: pointer to the admin queue receive event @@ -946,6 +969,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,  	struct device *dev = ice_pf_to_dev(pf);  	struct ice_aqc_lldp_get_mib *mib;  	struct ice_dcbx_cfg tmp_dcbx_cfg; +	bool pending_handled = true;  	bool need_reconfig = false;  	struct ice_port_info *pi;  	u8 mib_type; @@ -962,41 +986,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,  	pi = pf->hw.port_info;  	mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; +  	/* Ignore if event is not for Nearest Bridge */ -	mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & -		    ICE_AQ_LLDP_BRID_TYPE_M); +	mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);  	dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);  	if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)  		return; +	/* A pending change event contains accurate config information, and +	 * the FW setting has not been updaed yet, so detect if change is +	 * pending to determine where to pull config information from +	 * (FW vs event) +	 */ +	if (ice_dcb_is_mib_change_pending(mib->state)) +		pending_handled = false; +  	/* Check MIB Type and return if event for Remote MIB update */ -	mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; +	mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);  	dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");  	if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {  		/* Update the remote cached instance and return */ -		ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, -					 ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, -					 &pi->qos_cfg.remote_dcbx_cfg); -		if (ret) { -			dev_err(dev, "Failed to get remote DCB config\n"); -			return; +		if (!pending_handled) { +			ice_get_dcb_cfg_from_mib_change(pi, event); +		} else { +			ret = +			  ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, +					     ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, +					     &pi->qos_cfg.remote_dcbx_cfg); +			if (ret) +				dev_dbg(dev, "Failed to get remote DCB config\n");  		} +		return;  	} +	/* That a DCB change has happened is now determined */  	mutex_lock(&pf->tc_mutex);  	/* store the old configuration */ -	tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg; +	tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;  	/* Reset the old DCBX configuration data */  	memset(&pi->qos_cfg.local_dcbx_cfg, 0,  	       sizeof(pi->qos_cfg.local_dcbx_cfg));  	/* Get updated DCBX data from firmware */ -	ret = ice_get_dcb_cfg(pf->hw.port_info); -	if (ret) { -		dev_err(dev, "Failed to get DCB config\n"); -		goto out; +	if (!pending_handled) { +		ice_get_dcb_cfg_from_mib_change(pi, event); +	} else { +		ret = ice_get_dcb_cfg(pi); +		if (ret) { +			dev_err(dev, "Failed to get DCB config\n"); +			goto out; +		}  	}  	/* No change detected in DCBX configs */ @@ -1023,18 +1064,24 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,  		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);  	} +	/* Send Execute Pending MIB Change event if it is a Pending event */ +	if (!pending_handled) { +		ice_lldp_execute_pending_mib(&pf->hw); +		pending_handled = true; +	} +  	rtnl_lock();  	/* disable VSIs affected by DCB changes */  	ice_dcb_ena_dis_vsi(pf, false, true); -	ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); +	ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL);  	if (ret) {  		dev_err(dev, "Query Port ETS failed\n");  		goto unlock_rtnl;  	}  	/* changes in configuration update VSI */ -	ice_pf_dcb_recfg(pf); +	ice_pf_dcb_recfg(pf, false);  	/* enable previously downed VSIs */  	ice_dcb_ena_dis_vsi(pf, true, true); @@ -1042,4 +1089,8 @@ unlock_rtnl:  	rtnl_unlock();  out:  	mutex_unlock(&pf->tc_mutex); + +	/* Send Execute Pending MIB Change event if it is a Pending event */ +	if (!pending_handled) +		ice_lldp_execute_pending_mib(&pf->hw);  } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 4c421c842a13..800879a88c5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);  int  ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);  int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg); -void ice_pf_dcb_recfg(struct ice_pf *pf); +void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked);  void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);  int ice_init_pf_dcb(struct ice_pf *pf, bool locked);  void ice_update_dcb_stats(struct ice_pf *pf); @@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)  	return 0;  } -static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } +static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { }  static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }  static inline void ice_update_dcb_stats(struct ice_pf *pf) { }  static inline void diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c new file mode 100644 index 000000000000..d71ed210f9c4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -0,0 +1,1897 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include "ice_common.h" +#include "ice.h" +#include "ice_ddp.h" + +/* For supporting double VLAN mode, it is necessary to enable or disable certain + * boost tcam entries. The metadata labels names that match the following + * prefixes will be saved to allow enabling double VLAN mode. + */ +#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ +#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ + +/* To support tunneling entries by PF, the package will append the PF number to + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. + */ +#define ICE_TNL_PRE "TNL_" +static const struct ice_tunnel_type_scan tnls[] = { +	{ TNL_VXLAN, "TNL_VXLAN_PF" }, +	{ TNL_GENEVE, "TNL_GENEVE_PF" }, +	{ TNL_LAST, "" } +}; + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ +	u32 seg_count; +	u32 i; + +	if (len < struct_size(pkg, seg_offset, 1)) +		return ICE_DDP_PKG_INVALID_FILE; + +	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || +	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || +	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || +	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) +		return ICE_DDP_PKG_INVALID_FILE; + +	/* pkg must have at least one segment */ +	seg_count = le32_to_cpu(pkg->seg_count); +	if (seg_count < 1) +		return ICE_DDP_PKG_INVALID_FILE; + +	/* make sure segment array fits in package length */ +	if (len < struct_size(pkg, seg_offset, seg_count)) +		return ICE_DDP_PKG_INVALID_FILE; + +	/* all segments must fit within length */ +	for (i = 0; i < seg_count; i++) { +		u32 off = le32_to_cpu(pkg->seg_offset[i]); +		struct ice_generic_seg_hdr *seg; + +		/* segment header must fit */ +		if (len < off + sizeof(*seg)) +			return ICE_DDP_PKG_INVALID_FILE; + +		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + +		/* segment body must fit */ +		if (len < off + le32_to_cpu(seg->seg_size)) +			return ICE_DDP_PKG_INVALID_FILE; +	} + +	return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ +	if (hw->pkg_copy) { +		devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); +		hw->pkg_copy = NULL; +		hw->pkg_size = 0; +	} +	hw->seg = NULL; +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ +	if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || +	    (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && +	     pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) +		return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; +	else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || +		 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && +		  pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) +		return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; + +	return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ +	struct ice_buf_hdr *hdr; +	u16 section_count; +	u16 data_end; + +	hdr = (struct ice_buf_hdr *)buf->buf; +	/* verify data */ +	section_count = le16_to_cpu(hdr->section_count); +	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) +		return NULL; + +	data_end = le16_to_cpu(hdr->data_end); +	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) +		return NULL; + +	return hdr; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ +	struct ice_nvm_table *nvms = (struct ice_nvm_table *) +		(ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count)); + +	return (__force struct ice_buf_table *)(nvms->vers + +						le32_to_cpu(nvms->table_count)); +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, +					    struct ice_pkg_enum *state) +{ +	if (ice_seg) { +		state->buf_table = ice_find_buf_table(ice_seg); +		if (!state->buf_table) +			return NULL; + +		state->buf_idx = 0; +		return ice_pkg_val_buf(state->buf_table->buf_array); +	} + +	if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) +		return ice_pkg_val_buf(state->buf_table->buf_array + +				       state->buf_idx); +	else +		return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +static bool ice_pkg_advance_sect(struct ice_seg *ice_seg, +				 struct ice_pkg_enum *state) +{ +	if (!ice_seg && !state->buf) +		return false; + +	if (!ice_seg && state->buf) +		if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) +			return true; + +	state->buf = ice_pkg_enum_buf(ice_seg, state); +	if (!state->buf) +		return false; + +	/* start of new buffer, reset section index */ +	state->sect_idx = 0; +	return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, +			   u32 sect_type) +{ +	u16 offset, size; + +	if (ice_seg) +		state->type = sect_type; + +	if (!ice_pkg_advance_sect(ice_seg, state)) +		return NULL; + +	/* scan for next matching section */ +	while (state->buf->section_entry[state->sect_idx].type != +	       cpu_to_le32(state->type)) +		if (!ice_pkg_advance_sect(NULL, state)) +			return NULL; + +	/* validate section */ +	offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); +	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) +		return NULL; + +	size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); +	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) +		return NULL; + +	/* make sure the section fits in the buffer */ +	if (offset + size > ICE_PKG_BUF_SIZE) +		return NULL; + +	state->sect_type = +		le32_to_cpu(state->buf->section_entry[state->sect_idx].type); + +	/* calc pointer to this section */ +	state->sect = +		((u8 *)state->buf) + +		le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + +	return state->sect; +} + +/** + * ice_pkg_enum_entry + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * @offset: pointer to variable that receives the offset in the table (optional) + * @handler: function that handles access to the entries into the section type + * + * This function will enumerate all the entries in particular section type in + * the ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the entries has + * been reached. + * + * Since each section may have a different header and entry size, the handler + * function is needed to determine the number and location entries in each + * section. + * + * The offset parameter is optional, but should be used for sections that + * contain an offset for each section table. For such cases, the section handler + * function must return the appropriate offset + index to give the absolution + * offset for each entry. For example, if the base for a section's header + * indicates a base offset of 10, and the index for the entry is 2, then + * section handler function should set the offset to 10 + 2 = 12. + */ +static void *ice_pkg_enum_entry(struct ice_seg *ice_seg, +				struct ice_pkg_enum *state, u32 sect_type, +				u32 *offset, +				void *(*handler)(u32 sect_type, void *section, +						 u32 index, u32 *offset)) +{ +	void *entry; + +	if (ice_seg) { +		if (!handler) +			return NULL; + +		if (!ice_pkg_enum_section(ice_seg, state, sect_type)) +			return NULL; + +		state->entry_idx = 0; +		state->handler = handler; +	} else { +		state->entry_idx++; +	} + +	if (!state->handler) +		return NULL; + +	/* get entry */ +	entry = state->handler(state->sect_type, state->sect, state->entry_idx, +			       offset); +	if (!entry) { +		/* end of a section, look for another section of this type */ +		if (!ice_pkg_enum_section(NULL, state, 0)) +			return NULL; + +		state->entry_idx = 0; +		entry = state->handler(state->sect_type, state->sect, +				       state->entry_idx, offset); +	} + +	return entry; +} + +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index, +			       u32 *offset) +{ +	struct ice_sw_fv_section *fv_section = section; + +	if (!section || sect_type != ICE_SID_FLD_VEC_SW) +		return NULL; +	if (index >= le16_to_cpu(fv_section->count)) +		return NULL; +	if (offset) +		/* "index" passed in to this function is relative to a given +		 * 4k block. To get to the true index into the field vector +		 * table need to add the relative index to the base_offset +		 * field of this section +		 */ +		*offset = le16_to_cpu(fv_section->base_offset) + index; +	return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in HW for following use. + */ +static int ice_get_prof_index_max(struct ice_hw *hw) +{ +	u16 prof_index = 0, j, max_prof_index = 0; +	struct ice_pkg_enum state; +	struct ice_seg *ice_seg; +	bool flag = false; +	struct ice_fv *fv; +	u32 offset; + +	memset(&state, 0, sizeof(state)); + +	if (!hw->seg) +		return -EINVAL; + +	ice_seg = hw->seg; + +	do { +		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, +					&offset, ice_sw_fv_handler); +		if (!fv) +			break; +		ice_seg = NULL; + +		/* in the profile that not be used, the prot_id is set to 0xff +		 * and the off is set to 0x1ff for all the field vectors. +		 */ +		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) +			if (fv->ew[j].prot_id != ICE_PROT_INVALID || +			    fv->ew[j].off != ICE_FV_OFFSET_INVAL) +				flag = true; +		if (flag && prof_index > max_prof_index) +			max_prof_index = prof_index; + +		prof_index++; +		flag = false; +	} while (fv); + +	hw->switch_info->max_used_prof_index = max_prof_index; + +	return 0; +} + +/** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + */ +static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw, +						bool already_loaded) +{ +	if (hw->pkg_ver.major == hw->active_pkg_ver.major && +	    hw->pkg_ver.minor == hw->active_pkg_ver.minor && +	    hw->pkg_ver.update == hw->active_pkg_ver.update && +	    hw->pkg_ver.draft == hw->active_pkg_ver.draft && +	    !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { +		if (already_loaded) +			return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; +		else +			return ICE_DDP_PKG_SUCCESS; +	} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || +		   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { +		return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; +	} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && +		   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { +		return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; +	} else { +		return ICE_DDP_PKG_ERR; +	} +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + +	/* setup Switch block input mask, which is 48-bits in two parts */ +	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); +	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_marker_ptype_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the Marker PType TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual Marker PType TCAM entries. + */ +static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section, +					   u32 index, u32 *offset) +{ +	struct ice_marker_ptype_tcam_section *marker_ptype; + +	if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) +		return NULL; + +	if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) +		return NULL; + +	if (offset) +		*offset = 0; + +	marker_ptype = section; +	if (index >= le16_to_cpu(marker_ptype->count)) +		return NULL; + +	return marker_ptype->tcam + index; +} + +/** + * ice_add_dvm_hint + * @hw: pointer to the HW structure + * @val: value of the boost entry + * @enable: true if entry needs to be enabled, or false if needs to be disabled + */ +static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) +{ +	if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { +		hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; +		hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; +		hw->dvm_upd.count++; +	} +} + +/** + * ice_add_tunnel_hint + * @hw: pointer to the HW structure + * @label_name: label text + * @val: value of the tunnel port boost entry + */ +static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) +{ +	if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { +		u16 i; + +		for (i = 0; tnls[i].type != TNL_LAST; i++) { +			size_t len = strlen(tnls[i].label_prefix); + +			/* Look for matching label start, before continuing */ +			if (strncmp(label_name, tnls[i].label_prefix, len)) +				continue; + +			/* Make sure this label matches our PF. Note that the PF +			 * character ('0' - '7') will be located where our +			 * prefix string's null terminator is located. +			 */ +			if ((label_name[len] - '0') == hw->pf_id) { +				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; +				hw->tnl.tbl[hw->tnl.count].valid = false; +				hw->tnl.tbl[hw->tnl.count].boost_addr = val; +				hw->tnl.tbl[hw->tnl.count].port = 0; +				hw->tnl.count++; +				break; +			} +		} +	} +} + +/** + * ice_label_enum_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the label entry to be returned + * @offset: pointer to receive absolute offset, always zero for label sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual label entries. + */ +static void *ice_label_enum_handler(u32 __always_unused sect_type, +				    void *section, u32 index, u32 *offset) +{ +	struct ice_label_section *labels; + +	if (!section) +		return NULL; + +	if (index > ICE_MAX_LABELS_IN_BUF) +		return NULL; + +	if (offset) +		*offset = 0; + +	labels = section; +	if (index >= le16_to_cpu(labels->count)) +		return NULL; + +	return labels->label + index; +} + +/** + * ice_enum_labels + * @ice_seg: pointer to the ice segment (NULL on subsequent calls) + * @type: the section type that will contain the label (0 on subsequent calls) + * @state: ice_pkg_enum structure that will hold the state of the enumeration + * @value: pointer to a value that will return the label's value if found + * + * Enumerates a list of labels in the package. The caller will call + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL + * the end of the list has been reached. + */ +static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type, +			     struct ice_pkg_enum *state, u16 *value) +{ +	struct ice_label *label; + +	/* Check for valid label section on first call */ +	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) +		return NULL; + +	label = ice_pkg_enum_entry(ice_seg, state, type, NULL, +				   ice_label_enum_handler); +	if (!label) +		return NULL; + +	*value = le16_to_cpu(label->value); +	return label->name; +} + +/** + * ice_boost_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the boost TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual boost TCAM entries. + */ +static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, +				    u32 *offset) +{ +	struct ice_boost_tcam_section *boost; + +	if (!section) +		return NULL; + +	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) +		return NULL; + +	if (index > ICE_MAX_BST_TCAMS_IN_BUF) +		return NULL; + +	if (offset) +		*offset = 0; + +	boost = section; +	if (index >= le16_to_cpu(boost->count)) +		return NULL; + +	return boost->tcam + index; +} + +/** + * ice_find_boost_entry + * @ice_seg: pointer to the ice segment (non-NULL) + * @addr: Boost TCAM address of entry to search for + * @entry: returns pointer to the entry + * + * Finds a particular Boost TCAM entry and returns a pointer to that entry + * if it is found. The ice_seg parameter must not be NULL since the first call + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. + */ +static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, +				struct ice_boost_tcam_entry **entry) +{ +	struct ice_boost_tcam_entry *tcam; +	struct ice_pkg_enum state; + +	memset(&state, 0, sizeof(state)); + +	if (!ice_seg) +		return -EINVAL; + +	do { +		tcam = ice_pkg_enum_entry(ice_seg, &state, +					  ICE_SID_RXPARSER_BOOST_TCAM, NULL, +					  ice_boost_tcam_handler); +		if (tcam && le16_to_cpu(tcam->addr) == addr) { +			*entry = tcam; +			return 0; +		} + +		ice_seg = NULL; +	} while (tcam); + +	*entry = NULL; +	return -EIO; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ +	switch (state) { +	case ICE_DDP_PKG_SUCCESS: +	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: +	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: +		return true; +	default: +		return false; +	} +} + +/** + * ice_pkg_buf_alloc + * @hw: pointer to the HW structure + * + * Allocates a package buffer and returns a pointer to the buffer header. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) +{ +	struct ice_buf_build *bld; +	struct ice_buf_hdr *buf; + +	bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); +	if (!bld) +		return NULL; + +	buf = (struct ice_buf_hdr *)bld; +	buf->data_end = +		cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry)); +	return bld; +} + +static bool ice_is_gtp_u_profile(u16 prof_idx) +{ +	return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && +		prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || +	       prof_idx == ICE_PROFID_IPV4_GTPU_TEID; +} + +static bool ice_is_gtp_c_profile(u16 prof_idx) +{ +	switch (prof_idx) { +	case ICE_PROFID_IPV4_GTPC_TEID: +	case ICE_PROFID_IPV4_GTPC_NO_TEID: +	case ICE_PROFID_IPV6_GTPC_TEID: +	case ICE_PROFID_IPV6_GTPC_NO_TEID: +		return true; +	default: +		return false; +	} +} + +/** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + * @prof_idx: profile index to check + */ +static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, +					       struct ice_fv *fv, u32 prof_idx) +{ +	u16 i; + +	if (ice_is_gtp_c_profile(prof_idx)) +		return ICE_PROF_TUN_GTPC; + +	if (ice_is_gtp_u_profile(prof_idx)) +		return ICE_PROF_TUN_GTPU; + +	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { +		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */ +		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && +		    fv->ew[i].off == ICE_VNI_OFFSET) +			return ICE_PROF_TUN_UDP; + +		/* GRE tunnel will have GRE protocol */ +		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) +			return ICE_PROF_TUN_GRE; +	} + +	return ICE_PROF_NON_TUN; +} + +/** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, +			  unsigned long *bm) +{ +	struct ice_pkg_enum state; +	struct ice_seg *ice_seg; +	struct ice_fv *fv; + +	if (req_profs == ICE_PROF_ALL) { +		bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); +		return; +	} + +	memset(&state, 0, sizeof(state)); +	bitmap_zero(bm, ICE_MAX_NUM_PROFILES); +	ice_seg = hw->seg; +	do { +		enum ice_prof_type prof_type; +		u32 offset; + +		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, +					&offset, ice_sw_fv_handler); +		ice_seg = NULL; + +		if (fv) { +			/* Determine field vector type */ +			prof_type = ice_get_sw_prof_type(hw, fv, offset); + +			if (req_profs & prof_type) +				set_bit((u16)offset, bm); +		} +	} while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @lkups: list of protocol types + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and offset and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, +		       unsigned long *bm, struct list_head *fv_list) +{ +	struct ice_sw_fv_list_entry *fvl; +	struct ice_sw_fv_list_entry *tmp; +	struct ice_pkg_enum state; +	struct ice_seg *ice_seg; +	struct ice_fv *fv; +	u32 offset; + +	memset(&state, 0, sizeof(state)); + +	if (!lkups->n_val_words || !hw->seg) +		return -EINVAL; + +	ice_seg = hw->seg; +	do { +		u16 i; + +		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, +					&offset, ice_sw_fv_handler); +		if (!fv) +			break; +		ice_seg = NULL; + +		/* If field vector is not in the bitmap list, then skip this +		 * profile. +		 */ +		if (!test_bit((u16)offset, bm)) +			continue; + +		for (i = 0; i < lkups->n_val_words; i++) { +			int j; + +			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) +				if (fv->ew[j].prot_id == +					    lkups->fv_words[i].prot_id && +				    fv->ew[j].off == lkups->fv_words[i].off) +					break; +			if (j >= hw->blk[ICE_BLK_SW].es.fvw) +				break; +			if (i + 1 == lkups->n_val_words) { +				fvl = devm_kzalloc(ice_hw_to_dev(hw), +						   sizeof(*fvl), GFP_KERNEL); +				if (!fvl) +					goto err; +				fvl->fv_ptr = fv; +				fvl->profile_id = offset; +				list_add(&fvl->list_entry, fv_list); +				break; +			} +		} +	} while (fv); +	if (list_empty(fv_list)) { +		dev_warn(ice_hw_to_dev(hw), +			 "Required profiles not found in currently loaded DDP package"); +		return -EIO; +	} + +	return 0; + +err: +	list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { +		list_del(&fvl->list_entry); +		devm_kfree(ice_hw_to_dev(hw), fvl); +	} + +	return -ENOMEM; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ +	struct ice_pkg_enum state; +	struct ice_seg *ice_seg; +	struct ice_fv *fv; + +	memset(&state, 0, sizeof(state)); + +	if (!hw->seg) +		return; + +	ice_seg = hw->seg; +	do { +		u32 off; +		u16 i; + +		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, +					&off, ice_sw_fv_handler); +		ice_seg = NULL; +		if (!fv) +			break; + +		bitmap_zero(hw->switch_info->prof_res_bm[off], +			    ICE_MAX_FV_WORDS); + +		/* Determine empty field vector indices, these can be +		 * used for recipe results. Skip index 0, since it is +		 * always used for Switch ID. +		 */ +		for (i = 1; i < ICE_MAX_FV_WORDS; i++) +			if (fv->ew[i].prot_id == ICE_PROT_INVALID && +			    fv->ew[i].off == ICE_FV_OFFSET_INVAL) +				set_bit(i, hw->switch_info->prof_res_bm[off]); +	} while (fv); +} + +/** + * ice_pkg_buf_free + * @hw: pointer to the HW structure + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Frees a package buffer + */ +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +{ +	devm_kfree(ice_hw_to_dev(hw), bld); +} + +/** + * ice_pkg_buf_reserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to reserve + * + * Reserves one or more section table entries in a package buffer. This routine + * can be called multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) +{ +	struct ice_buf_hdr *buf; +	u16 section_count; +	u16 data_end; + +	if (!bld) +		return -EINVAL; + +	buf = (struct ice_buf_hdr *)&bld->buf; + +	/* already an active section, can't increase table size */ +	section_count = le16_to_cpu(buf->section_count); +	if (section_count > 0) +		return -EIO; + +	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) +		return -EIO; +	bld->reserved_section_table_entries += count; + +	data_end = le16_to_cpu(buf->data_end) + +		   flex_array_size(buf, section_entry, count); +	buf->data_end = cpu_to_le16(data_end); + +	return 0; +} + +/** + * ice_pkg_buf_alloc_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * + * Reserves memory in the buffer for a section's content and updates the + * buffers' status accordingly. This routine returns a pointer to the first + * byte of the section start within the buffer, which is used to fill in the + * section contents. + * Note: all package contents must be in Little Endian form. + */ +void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) +{ +	struct ice_buf_hdr *buf; +	u16 sect_count; +	u16 data_end; + +	if (!bld || !type || !size) +		return NULL; + +	buf = (struct ice_buf_hdr *)&bld->buf; + +	/* check for enough space left in buffer */ +	data_end = le16_to_cpu(buf->data_end); + +	/* section start must align on 4 byte boundary */ +	data_end = ALIGN(data_end, 4); + +	if ((data_end + size) > ICE_MAX_S_DATA_END) +		return NULL; + +	/* check for more available section table entries */ +	sect_count = le16_to_cpu(buf->section_count); +	if (sect_count < bld->reserved_section_table_entries) { +		void *section_ptr = ((u8 *)buf) + data_end; + +		buf->section_entry[sect_count].offset = cpu_to_le16(data_end); +		buf->section_entry[sect_count].size = cpu_to_le16(size); +		buf->section_entry[sect_count].type = cpu_to_le32(type); + +		data_end += size; +		buf->data_end = cpu_to_le16(data_end); + +		buf->section_count = cpu_to_le16(sect_count + 1); +		return section_ptr; +	} + +	/* no free section table entries */ +	return NULL; +} + +/** + * ice_pkg_buf_alloc_single_section + * @hw: pointer to the HW structure + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * @section: returns pointer to the section + * + * Allocates a package buffer with a single section. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw, +						       u32 type, u16 size, +						       void **section) +{ +	struct ice_buf_build *buf; + +	if (!section) +		return NULL; + +	buf = ice_pkg_buf_alloc(hw); +	if (!buf) +		return NULL; + +	if (ice_pkg_buf_reserve_section(buf, 1)) +		goto ice_pkg_buf_alloc_single_section_err; + +	*section = ice_pkg_buf_alloc_section(buf, type, size); +	if (!*section) +		goto ice_pkg_buf_alloc_single_section_err; + +	return buf; + +ice_pkg_buf_alloc_single_section_err: +	ice_pkg_buf_free(hw, buf); +	return NULL; +} + +/** + * ice_pkg_buf_get_active_sections + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of active sections. Before using the package buffer + * in an update package command, the caller should make sure that there is at + * least one active section - otherwise, the buffer is not legal and should + * not be used. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) +{ +	struct ice_buf_hdr *buf; + +	if (!bld) +		return 0; + +	buf = (struct ice_buf_hdr *)&bld->buf; +	return le16_to_cpu(buf->section_count); +} + +/** + * ice_pkg_buf + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Return a pointer to the buffer's header + */ +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +{ +	if (!bld) +		return NULL; + +	return &bld->buf; +} + +static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ +	switch (aq_err) { +	case ICE_AQ_RC_ENOSEC: +	case ICE_AQ_RC_EBADSIG: +		return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; +	case ICE_AQ_RC_ESVN: +		return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; +	case ICE_AQ_RC_EBADMAN: +	case ICE_AQ_RC_EBADBUF: +		return ICE_DDP_PKG_LOAD_ERROR; +	default: +		return ICE_DDP_PKG_ERR; +	} +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * 0         -  Means the caller has acquired the global config lock + *              and can perform writing of the package. + * -EALREADY - Indicates another driver has already written the + *             package or has found that no update was necessary; in + *             this case, the caller can just skip performing any + *             update of the package. + */ +static int ice_acquire_global_cfg_lock(struct ice_hw *hw, +				       enum ice_aq_res_access_type access) +{ +	int status; + +	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, +				 ICE_GLOBAL_CFG_LOCK_TIMEOUT); + +	if (!status) +		mutex_lock(&ice_global_cfg_lock_sw); +	else if (status == -EALREADY) +		ice_debug(hw, ICE_DBG_PKG, +			  "Global config lock: No work to do\n"); + +	return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +static void ice_release_global_cfg_lock(struct ice_hw *hw) +{ +	mutex_unlock(&ice_global_cfg_lock_sw); +	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. Metadata buffers are skipped, and the first metadata buffer + * found indicates that the rest of the buffers are all metadata buffers. + */ +static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, +					     struct ice_buf *bufs, u32 count) +{ +	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; +	struct ice_buf_hdr *bh; +	enum ice_aq_err err; +	u32 offset, info, i; +	int status; + +	if (!bufs || !count) +		return ICE_DDP_PKG_ERR; + +	/* If the first buffer's first section has its metadata bit set +	 * then there are no buffers to be downloaded, and the operation is +	 * considered a success. +	 */ +	bh = (struct ice_buf_hdr *)bufs; +	if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) +		return ICE_DDP_PKG_SUCCESS; + +	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); +	if (status) { +		if (status == -EALREADY) +			return ICE_DDP_PKG_ALREADY_LOADED; +		return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); +	} + +	for (i = 0; i < count; i++) { +		bool last = ((i + 1) == count); + +		if (!last) { +			/* check next buffer for metadata flag */ +			bh = (struct ice_buf_hdr *)(bufs + i + 1); + +			/* A set metadata flag in the next buffer will signal +			 * that the current buffer will be the last buffer +			 * downloaded +			 */ +			if (le16_to_cpu(bh->section_count)) +				if (le32_to_cpu(bh->section_entry[0].type) & +				    ICE_METADATA_BUF) +					last = true; +		} + +		bh = (struct ice_buf_hdr *)(bufs + i); + +		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, +					     &offset, &info, NULL); + +		/* Save AQ status from download package */ +		if (status) { +			ice_debug(hw, ICE_DBG_PKG, +				  "Pkg download failed: err %d off %d inf %d\n", +				  status, offset, info); +			err = hw->adminq.sq_last_status; +			state = ice_map_aq_err_to_ddp_state(err); +			break; +		} + +		if (last) +			break; +	} + +	if (!status) { +		status = ice_set_vlan_mode(hw); +		if (status) +			ice_debug(hw, ICE_DBG_PKG, +				  "Failed to set VLAN mode: err %d\n", status); +	} + +	ice_release_global_cfg_lock(hw); + +	return state; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static int ice_aq_get_pkg_info_list(struct ice_hw *hw, +				    struct ice_aqc_get_pkg_info_resp *pkg_info, +				    u16 buf_size, struct ice_sq_cd *cd) +{ +	struct ice_aq_desc desc; + +	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + +	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, +					   struct ice_seg *ice_seg) +{ +	struct ice_buf_table *ice_buf_tbl; +	int status; + +	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", +		  ice_seg->hdr.seg_format_ver.major, +		  ice_seg->hdr.seg_format_ver.minor, +		  ice_seg->hdr.seg_format_ver.update, +		  ice_seg->hdr.seg_format_ver.draft); + +	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", +		  le32_to_cpu(ice_seg->hdr.seg_type), +		  le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); + +	ice_buf_tbl = ice_find_buf_table(ice_seg); + +	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", +		  le32_to_cpu(ice_buf_tbl->buf_count)); + +	status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, +				    le32_to_cpu(ice_buf_tbl->buf_count)); + +	ice_post_pkg_dwnld_vlan_mode_cfg(hw); + +	return status; +} + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, +			u16 buf_size, bool last_buf, u32 *error_offset, +			u32 *error_info, struct ice_sq_cd *cd) +{ +	struct ice_aqc_download_pkg *cmd; +	struct ice_aq_desc desc; +	int status; + +	if (error_offset) +		*error_offset = 0; +	if (error_info) +		*error_info = 0; + +	cmd = &desc.params.download_pkg; +	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); +	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + +	if (last_buf) +		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + +	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +	if (status == -EIO) { +		/* Read error from buffer only when the FW returned an error */ +		struct ice_aqc_download_pkg_resp *resp; + +		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; +		if (error_offset) +			*error_offset = le32_to_cpu(resp->error_offset); +		if (error_info) +			*error_info = le32_to_cpu(resp->error_info); +	} + +	return status; +} + +/** + * ice_aq_upload_section + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer which will receive the section + * @buf_size: the size of the package buffer + * @cd: pointer to command details structure or NULL + * + * Upload Section (0x0C41) + */ +int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, +			  u16 buf_size, struct ice_sq_cd *cd) +{ +	struct ice_aq_desc desc; + +	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); +	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + +	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +} + +/** + * ice_aq_update_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package cmd buffer + * @buf_size: the size of the package cmd buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Update Package (0x0C42) + */ +static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, +			     u16 buf_size, bool last_buf, u32 *error_offset, +			     u32 *error_info, struct ice_sq_cd *cd) +{ +	struct ice_aqc_download_pkg *cmd; +	struct ice_aq_desc desc; +	int status; + +	if (error_offset) +		*error_offset = 0; +	if (error_info) +		*error_info = 0; + +	cmd = &desc.params.download_pkg; +	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); +	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + +	if (last_buf) +		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + +	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +	if (status == -EIO) { +		/* Read error from buffer only when the FW returned an error */ +		struct ice_aqc_download_pkg_resp *resp; + +		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; +		if (error_offset) +			*error_offset = le32_to_cpu(resp->error_offset); +		if (error_info) +			*error_info = le32_to_cpu(resp->error_info); +	} + +	return status; +} + +/** + * ice_update_pkg_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + */ +int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ +	int status = 0; +	u32 i; + +	for (i = 0; i < count; i++) { +		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); +		bool last = ((i + 1) == count); +		u32 offset, info; + +		status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), +					   last, &offset, &info, NULL); + +		if (status) { +			ice_debug(hw, ICE_DBG_PKG, +				  "Update pkg failed: err %d off %d inf %d\n", +				  status, offset, info); +			break; +		} +	} + +	return status; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ +	int status; + +	status = ice_acquire_change_lock(hw, ICE_RES_WRITE); +	if (status) +		return status; + +	status = ice_update_pkg_no_lock(hw, bufs, count); + +	ice_release_change_lock(hw); + +	return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, +						struct ice_pkg_hdr *pkg_hdr) +{ +	u32 i; + +	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", +		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, +		  pkg_hdr->pkg_format_ver.update, +		  pkg_hdr->pkg_format_ver.draft); + +	/* Search all package segments for the requested segment type */ +	for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { +		struct ice_generic_seg_hdr *seg; + +		seg = (struct ice_generic_seg_hdr +			       *)((u8 *)pkg_hdr + +				  le32_to_cpu(pkg_hdr->seg_offset[i])); + +		if (le32_to_cpu(seg->seg_type) == seg_type) +			return seg; +	} + +	return NULL; +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, +					    struct ice_pkg_hdr *pkg_hdr) +{ +	struct ice_generic_seg_hdr *seg_hdr; + +	if (!pkg_hdr) +		return ICE_DDP_PKG_ERR; + +	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); +	if (seg_hdr) { +		struct ice_meta_sect *meta; +		struct ice_pkg_enum state; + +		memset(&state, 0, sizeof(state)); + +		/* Get package information from the Metadata Section */ +		meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, +					    ICE_SID_METADATA); +		if (!meta) { +			ice_debug(hw, ICE_DBG_INIT, +				  "Did not find ice metadata section in package\n"); +			return ICE_DDP_PKG_INVALID_FILE; +		} + +		hw->pkg_ver = meta->ver; +		memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); + +		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", +			  meta->ver.major, meta->ver.minor, meta->ver.update, +			  meta->ver.draft, meta->name); + +		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; +		memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id)); + +		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", +			  seg_hdr->seg_format_ver.major, +			  seg_hdr->seg_format_ver.minor, +			  seg_hdr->seg_format_ver.update, +			  seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); +	} else { +		ice_debug(hw, ICE_DBG_INIT, +			  "Did not find ice segment in driver package\n"); +		return ICE_DDP_PKG_INVALID_FILE; +	} + +	return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) +{ +	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; +	struct ice_aqc_get_pkg_info_resp *pkg_info; +	u16 size; +	u32 i; + +	size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); +	pkg_info = kzalloc(size, GFP_KERNEL); +	if (!pkg_info) +		return ICE_DDP_PKG_ERR; + +	if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { +		state = ICE_DDP_PKG_ERR; +		goto init_pkg_free_alloc; +	} + +	for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 +		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; +		u8 place = 0; + +		if (pkg_info->pkg_info[i].is_active) { +			flags[place++] = 'A'; +			hw->active_pkg_ver = pkg_info->pkg_info[i].ver; +			hw->active_track_id = +				le32_to_cpu(pkg_info->pkg_info[i].track_id); +			memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, +			       sizeof(pkg_info->pkg_info[i].name)); +			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; +		} +		if (pkg_info->pkg_info[i].is_active_at_boot) +			flags[place++] = 'B'; +		if (pkg_info->pkg_info[i].is_modified) +			flags[place++] = 'M'; +		if (pkg_info->pkg_info[i].is_in_nvm) +			flags[place++] = 'N'; + +		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i, +			  pkg_info->pkg_info[i].ver.major, +			  pkg_info->pkg_info[i].ver.minor, +			  pkg_info->pkg_info[i].ver.update, +			  pkg_info->pkg_info[i].ver.draft, +			  pkg_info->pkg_info[i].name, flags); +	} + +init_pkg_free_alloc: +	kfree(pkg_info); + +	return state; +} + +/** + * ice_chk_pkg_compat + * @hw: pointer to the hardware structure + * @ospkg: pointer to the package hdr + * @seg: pointer to the package segment hdr + * + * This function checks the package version compatibility with driver and NVM + */ +static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, +					     struct ice_pkg_hdr *ospkg, +					     struct ice_seg **seg) +{ +	struct ice_aqc_get_pkg_info_resp *pkg; +	enum ice_ddp_state state; +	u16 size; +	u32 i; + +	/* Check package version compatibility */ +	state = ice_chk_pkg_version(&hw->pkg_ver); +	if (state) { +		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); +		return state; +	} + +	/* find ICE segment in given package */ +	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, +						     ospkg); +	if (!*seg) { +		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); +		return ICE_DDP_PKG_INVALID_FILE; +	} + +	/* Check if FW is compatible with the OS package */ +	size = struct_size(pkg, pkg_info, ICE_PKG_CNT); +	pkg = kzalloc(size, GFP_KERNEL); +	if (!pkg) +		return ICE_DDP_PKG_ERR; + +	if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { +		state = ICE_DDP_PKG_LOAD_ERROR; +		goto fw_ddp_compat_free_alloc; +	} + +	for (i = 0; i < le32_to_cpu(pkg->count); i++) { +		/* loop till we find the NVM package */ +		if (!pkg->pkg_info[i].is_in_nvm) +			continue; +		if ((*seg)->hdr.seg_format_ver.major != +			    pkg->pkg_info[i].ver.major || +		    (*seg)->hdr.seg_format_ver.minor > +			    pkg->pkg_info[i].ver.minor) { +			state = ICE_DDP_PKG_FW_MISMATCH; +			ice_debug(hw, ICE_DBG_INIT, +				  "OS package is not compatible with NVM.\n"); +		} +		/* done processing NVM package so break */ +		break; +	} +fw_ddp_compat_free_alloc: +	kfree(pkg); +	return state; +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ +	struct ice_pkg_enum state; +	char *label_name; +	u16 val; +	int i; + +	memset(&hw->tnl, 0, sizeof(hw->tnl)); +	memset(&state, 0, sizeof(state)); + +	if (!ice_seg) +		return; + +	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, +				     &val); + +	while (label_name) { +		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) +			/* check for a tunnel entry */ +			ice_add_tunnel_hint(hw, label_name, val); + +		/* check for a dvm mode entry */ +		else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) +			ice_add_dvm_hint(hw, val, true); + +		/* check for a svm mode entry */ +		else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) +			ice_add_dvm_hint(hw, val, false); + +		label_name = ice_enum_labels(NULL, 0, &state, &val); +	} + +	/* Cache the appropriate boost TCAM entry pointers for tunnels */ +	for (i = 0; i < hw->tnl.count; i++) { +		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, +				     &hw->tnl.tbl[i].boost_entry); +		if (hw->tnl.tbl[i].boost_entry) { +			hw->tnl.tbl[i].valid = true; +			if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) +				hw->tnl.valid_count[hw->tnl.tbl[i].type]++; +		} +	} + +	/* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ +	for (i = 0; i < hw->dvm_upd.count; i++) +		ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, +				     &hw->dvm_upd.tbl[i].boost_entry); +} + +/** + * ice_fill_hw_ptype - fill the enabled PTYPE bit information + * @hw: pointer to the HW structure + */ +static void ice_fill_hw_ptype(struct ice_hw *hw) +{ +	struct ice_marker_ptype_tcam_entry *tcam; +	struct ice_seg *seg = hw->seg; +	struct ice_pkg_enum state; + +	bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); +	if (!seg) +		return; + +	memset(&state, 0, sizeof(state)); + +	do { +		tcam = ice_pkg_enum_entry(seg, &state, +					  ICE_SID_RXPARSER_MARKER_PTYPE, NULL, +					  ice_marker_ptype_tcam_handler); +		if (tcam && +		    le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && +		    le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) +			set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); + +		seg = NULL; +	} while (tcam); +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ +	bool already_loaded = false; +	enum ice_ddp_state state; +	struct ice_pkg_hdr *pkg; +	struct ice_seg *seg; + +	if (!buf || !len) +		return ICE_DDP_PKG_ERR; + +	pkg = (struct ice_pkg_hdr *)buf; +	state = ice_verify_pkg(pkg, len); +	if (state) { +		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", +			  state); +		return state; +	} + +	/* initialize package info */ +	state = ice_init_pkg_info(hw, pkg); +	if (state) +		return state; + +	/* before downloading the package, check package version for +	 * compatibility with driver +	 */ +	state = ice_chk_pkg_compat(hw, pkg, &seg); +	if (state) +		return state; + +	/* initialize package hints and then download package */ +	ice_init_pkg_hints(hw, seg); +	state = ice_download_pkg(hw, seg); +	if (state == ICE_DDP_PKG_ALREADY_LOADED) { +		ice_debug(hw, ICE_DBG_INIT, +			  "package previously loaded - no work.\n"); +		already_loaded = true; +	} + +	/* Get information on the package currently loaded in HW, then make sure +	 * the driver is compatible with this version. +	 */ +	if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { +		state = ice_get_pkg_info(hw); +		if (!state) +			state = ice_get_ddp_pkg_state(hw, already_loaded); +	} + +	if (ice_is_init_pkg_successful(state)) { +		hw->seg = seg; +		/* on successful package download update other required +		 * registers to support the package and fill HW tables +		 * with package content. +		 */ +		ice_init_pkg_regs(hw); +		ice_fill_blk_tbls(hw); +		ice_fill_hw_ptype(hw); +		ice_get_prof_index_max(hw); +	} else { +		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state); +	} + +	return state; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, +					 u32 len) +{ +	enum ice_ddp_state state; +	u8 *buf_copy; + +	if (!buf || !len) +		return ICE_DDP_PKG_ERR; + +	buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + +	state = ice_init_pkg(hw, buf_copy, len); +	if (!ice_is_init_pkg_successful(state)) { +		/* Free the copy, since we failed to initialize the package */ +		devm_kfree(ice_hw_to_dev(hw), buf_copy); +	} else { +		/* Track the copied pkg so we can free it later */ +		hw->pkg_copy = buf_copy; +		hw->pkg_size = len; +	} + +	return state; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h new file mode 100644 index 000000000000..37eadb3d27a8 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -0,0 +1,445 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2022, Intel Corporation. */ + +#ifndef _ICE_DDP_H_ +#define _ICE_DDP_H_ + +#include "ice_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +#define ICE_FV_OFFSET_INVAL 0x1FF + +/* Extraction Sequence (Field Vector) Table */ +struct ice_fv_word { +	u8 prot_id; +	u16 off; /* Offset within the protocol header */ +	u8 resvrd; +} __packed; + +#define ICE_MAX_NUM_PROFILES 256 + +#define ICE_MAX_FV_WORDS 48 +struct ice_fv { +	struct ice_fv_word ew[ICE_MAX_FV_WORDS]; +}; + +enum ice_ddp_state { +	/* Indicates that this call to ice_init_pkg +	 * successfully loaded the requested DDP package +	 */ +	ICE_DDP_PKG_SUCCESS = 0, + +	/* Generic error for already loaded errors, it is mapped later to +	 * the more specific one (one of the next 3) +	 */ +	ICE_DDP_PKG_ALREADY_LOADED = -1, + +	/* Indicates that a DDP package of the same version has already been +	 * loaded onto the device by a previous call or by another PF +	 */ +	ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + +	/* The device has a DDP package that is not supported by the driver */ +	ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + +	/* The device has a compatible package +	 * (but different from the request) already loaded +	 */ +	ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + +	/* The firmware loaded on the device is not compatible with +	 * the DDP package loaded +	 */ +	ICE_DDP_PKG_FW_MISMATCH = -5, + +	/* The DDP package file is invalid */ +	ICE_DDP_PKG_INVALID_FILE = -6, + +	/* The version of the DDP package provided is higher than +	 * the driver supports +	 */ +	ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + +	/* The version of the DDP package provided is lower than the +	 * driver supports +	 */ +	ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + +	/* The signature of the DDP package file provided is invalid */ +	ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9, + +	/* The DDP package file security revision is too low and not +	 * supported by firmware +	 */ +	ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10, + +	/* An error occurred in firmware while loading the DDP package */ +	ICE_DDP_PKG_LOAD_ERROR = -11, + +	/* Other errors */ +	ICE_DDP_PKG_ERR = -12 +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { +	struct ice_pkg_ver pkg_format_ver; +	__le32 seg_count; +	__le32 seg_offset[]; +}; + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE 0x00000010 +	__le32 seg_type; +	struct ice_pkg_ver seg_format_ver; +	__le32 seg_size; +	char seg_id[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { +	struct { +		__le16 device_id; +		__le16 vendor_id; +	} dev_vend_id; +	__le32 id; +}; + +struct ice_device_id_entry { +	union ice_device_id device; +	union ice_device_id sub_device; +}; + +struct ice_seg { +	struct ice_generic_seg_hdr hdr; +	__le32 device_table_count; +	struct ice_device_id_entry device_table[]; +}; + +struct ice_nvm_table { +	__le32 table_count; +	__le32 vers[]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 +	u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { +	__le32 buf_count; +	struct ice_buf buf_array[]; +}; + +struct ice_run_time_cfg_seg { +	struct ice_generic_seg_hdr hdr; +	u8 rsvd[8]; +	struct ice_buf_table buf_table; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { +	struct ice_generic_seg_hdr hdr; +	struct ice_pkg_ver pkg_ver; +	__le32 rsvd; +	char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +/* section information */ +struct ice_section_entry { +	__le32 type; +	__le16 offset; +	__le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { +	__le16 section_count; +	__le16 data_end; +	struct ice_section_entry section_entry[]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz)                                 \ +	((ICE_PKG_BUF_SIZE -                                                  \ +	  struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \ +	 (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_METADATA 1 +#define ICE_SID_XLT0_SW 10 +#define ICE_SID_XLT_KEY_BUILDER_SW 11 +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 +#define ICE_SID_CDID_KEY_BUILDER_SW 17 + +struct ice_meta_sect { +	struct ice_pkg_ver ver; +#define ICE_META_SECT_NAME_SIZE 28 +	char name[ICE_META_SECT_NAME_SIZE]; +	__le32 track_id; +}; + +#define ICE_SID_CDID_REDIR_SW 18 + +#define ICE_SID_XLT0_ACL 20 +#define ICE_SID_XLT_KEY_BUILDER_ACL 21 +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 +#define ICE_SID_CDID_KEY_BUILDER_ACL 27 +#define ICE_SID_CDID_REDIR_ACL 28 + +#define ICE_SID_XLT0_FD 30 +#define ICE_SID_XLT_KEY_BUILDER_FD 31 +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 +#define ICE_SID_CDID_KEY_BUILDER_FD 37 +#define ICE_SID_CDID_REDIR_FD 38 + +#define ICE_SID_XLT0_RSS 40 +#define ICE_SID_XLT_KEY_BUILDER_RSS 41 +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 +#define ICE_SID_CDID_KEY_BUILDER_RSS 47 +#define ICE_SID_CDID_REDIR_RSS 48 + +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 +#define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_METADATA_INIT 58 +#define ICE_SID_TXPARSER_BOOST_TCAM 66 + +#define ICE_SID_XLT0_PE 80 +#define ICE_SID_XLT_KEY_BUILDER_PE 81 +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 +#define ICE_SID_CDID_KEY_BUILDER_PE 87 +#define ICE_SID_CDID_REDIR_PE 88 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +/* Label ICE runtime configuration section IDs */ +#define ICE_SID_TX_5_LAYER_TOPO 0x10 + +enum ice_block { +	ICE_BLK_SW = 0, +	ICE_BLK_ACL, +	ICE_BLK_FD, +	ICE_BLK_RSS, +	ICE_BLK_PE, +	ICE_BLK_COUNT +}; + +enum ice_sect { +	ICE_XLT0 = 0, +	ICE_XLT_KB, +	ICE_XLT1, +	ICE_XLT2, +	ICE_PROF_TCAM, +	ICE_PROF_REDIR, +	ICE_VEC_TBL, +	ICE_CDID_KB, +	ICE_CDID_REDIR, +	ICE_SECT_COUNT +}; + +/* package labels */ +struct ice_label { +	__le16 value; +#define ICE_PKG_LABEL_SIZE 64 +	char name[ICE_PKG_LABEL_SIZE]; +}; + +struct ice_label_section { +	__le16 count; +	struct ice_label label[]; +}; + +#define ICE_MAX_LABELS_IN_BUF                                             \ +	ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \ +					   label, 1) -                    \ +				       sizeof(struct ice_label),          \ +			       sizeof(struct ice_label)) + +struct ice_sw_fv_section { +	__le16 count; +	__le16 base_offset; +	struct ice_fv fv[]; +}; + +struct ice_sw_fv_list_entry { +	struct list_head list_entry; +	u32 profile_id; +	struct ice_fv *fv_ptr; +}; + +/* The BOOST TCAM stores the match packet header in reverse order, meaning + * the fields are reversed; in addition, this means that the normally big endian + * fields of the packet are now little endian. + */ +struct ice_boost_key_value { +#define ICE_BOOST_REMAINING_HV_KEY 15 +	u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; +	__le16 hv_dst_port_key; +	__le16 hv_src_port_key; +	u8 tcam_search_key; +} __packed; + +struct ice_boost_key { +	struct ice_boost_key_value key; +	struct ice_boost_key_value key2; +}; + +/* package Boost TCAM entry */ +struct ice_boost_tcam_entry { +	__le16 addr; +	__le16 reserved; +	/* break up the 40 bytes of key into different fields */ +	struct ice_boost_key key; +	u8 boost_hit_index_group; +	/* The following contains bitfields which are not on byte boundaries. +	 * These fields are currently unused by driver software. +	 */ +#define ICE_BOOST_BIT_FIELDS 43 +	u8 bit_fields[ICE_BOOST_BIT_FIELDS]; +}; + +struct ice_boost_tcam_section { +	__le16 count; +	__le16 reserved; +	struct ice_boost_tcam_entry tcam[]; +}; + +#define ICE_MAX_BST_TCAMS_IN_BUF                                               \ +	ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \ +					   tcam, 1) -                          \ +				       sizeof(struct ice_boost_tcam_entry),    \ +			       sizeof(struct ice_boost_tcam_entry)) + +/* package Marker Ptype TCAM entry */ +struct ice_marker_ptype_tcam_entry { +#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 +	__le16 addr; +	__le16 ptype; +	u8 keys[20]; +}; + +struct ice_marker_ptype_tcam_section { +	__le16 count; +	__le16 reserved; +	struct ice_marker_ptype_tcam_entry tcam[]; +}; + +#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF                                    \ +	ICE_MAX_ENTRIES_IN_BUF(                                              \ +		struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \ +			    1) -                                             \ +			sizeof(struct ice_marker_ptype_tcam_entry),          \ +		sizeof(struct ice_marker_ptype_tcam_entry)) + +struct ice_xlt1_section { +	__le16 count; +	__le16 offset; +	u8 value[]; +}; + +struct ice_xlt2_section { +	__le16 count; +	__le16 offset; +	__le16 value[]; +}; + +struct ice_prof_redir_section { +	__le16 count; +	__le16 offset; +	u8 redir_value[]; +}; + +/* package buffer building */ + +struct ice_buf_build { +	struct ice_buf buf; +	u16 reserved_section_table_entries; +}; + +struct ice_pkg_enum { +	struct ice_buf_table *buf_table; +	u32 buf_idx; + +	u32 type; +	struct ice_buf_hdr *buf; +	u32 sect_idx; +	void *sect; +	u32 sect_type; + +	u32 entry_idx; +	void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, +			u16 buf_size, bool last_buf, u32 *error_offset, +			u32 *error_info, struct ice_sq_cd *cd); +int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, +			  u16 buf_size, struct ice_sq_cd *cd); + +void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); + +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); + +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); + +struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, +						struct ice_pkg_hdr *pkg_hdr); + +int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); + +int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); +void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, +			   u32 sect_type); + +struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf); + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index e6ec20079ced..05f216af8c81 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -8,6 +8,7 @@  #include "ice_devlink.h"  #include "ice_eswitch.h"  #include "ice_fw_update.h" +#include "ice_dcb_lib.h"  static int ice_active_port_option = -1; @@ -310,12 +311,6 @@ static int ice_devlink_info_get(struct devlink *devlink,  		}  	} -	err = devlink_info_driver_name_put(req, KBUILD_MODNAME); -	if (err) { -		NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name"); -		goto out_free_ctx; -	} -  	ice_info_get_dsn(pf, ctx);  	err = devlink_info_serial_number_put(req, ctx->buf); @@ -376,10 +371,7 @@ out_free_ctx:  /**   * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware - * @devlink: pointer to the devlink instance to reload - * @netns_change: if true, the network namespace is changing - * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE - * @limit: limits on what reload should do, such as not resetting + * @pf: pointer to the pf instance   * @extack: netlink extended ACK structure   *   * Allow user to activate new Embedded Management Processor firmware by @@ -392,12 +384,9 @@ out_free_ctx:   * any source.   */  static int -ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, -			      enum devlink_reload_action action, -			      enum devlink_reload_limit limit, +ice_devlink_reload_empr_start(struct ice_pf *pf,  			      struct netlink_ext_ack *extack)  { -	struct ice_pf *pf = devlink_priv(devlink);  	struct device *dev = ice_pf_to_dev(pf);  	struct ice_hw *hw = &pf->hw;  	u8 pending; @@ -436,11 +425,51 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,  }  /** + * ice_devlink_reload_down - prepare for reload + * @devlink: pointer to the devlink instance to reload + * @netns_change: if true, the network namespace is changing + * @action: the action to perform + * @limit: limits on what reload should do, such as not resetting + * @extack: netlink extended ACK structure + */ +static int +ice_devlink_reload_down(struct devlink *devlink, bool netns_change, +			enum devlink_reload_action action, +			enum devlink_reload_limit limit, +			struct netlink_ext_ack *extack) +{ +	struct ice_pf *pf = devlink_priv(devlink); + +	switch (action) { +	case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: +		if (ice_is_eswitch_mode_switchdev(pf)) { +			NL_SET_ERR_MSG_MOD(extack, +					   "Go to legacy mode before doing reinit\n"); +			return -EOPNOTSUPP; +		} +		if (ice_is_adq_active(pf)) { +			NL_SET_ERR_MSG_MOD(extack, +					   "Turn off ADQ before doing reinit\n"); +			return -EOPNOTSUPP; +		} +		if (ice_has_vfs(pf)) { +			NL_SET_ERR_MSG_MOD(extack, +					   "Remove all VFs before doing reinit\n"); +			return -EOPNOTSUPP; +		} +		ice_unload(pf); +		return 0; +	case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: +		return ice_devlink_reload_empr_start(pf, extack); +	default: +		WARN_ON(1); +		return -EOPNOTSUPP; +	} +} + +/**   * ice_devlink_reload_empr_finish - Wait for EMP reset to finish - * @devlink: pointer to the devlink instance reloading - * @action: the action requested - * @limit: limits imposed by userspace, such as not resetting - * @actions_performed: on return, indicate what actions actually performed + * @pf: pointer to the pf instance   * @extack: netlink extended ACK structure   *   * Wait for driver to finish rebuilding after EMP reset is completed. This @@ -448,17 +477,11 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,   * for the driver's rebuild to complete.   */  static int -ice_devlink_reload_empr_finish(struct devlink *devlink, -			       enum devlink_reload_action action, -			       enum devlink_reload_limit limit, -			       u32 *actions_performed, +ice_devlink_reload_empr_finish(struct ice_pf *pf,  			       struct netlink_ext_ack *extack)  { -	struct ice_pf *pf = devlink_priv(devlink);  	int err; -	*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); -  	err = ice_wait_for_reset(pf, 60 * HZ);  	if (err) {  		NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); @@ -713,18 +736,549 @@ ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,  	return ice_devlink_port_split(devlink, port, 1, extack);  } +/** + * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree + * @pf: pf struct + * + * This function tears down tree exported during VF's creation. + */ +void ice_tear_down_devlink_rate_tree(struct ice_pf *pf) +{ +	struct devlink *devlink; +	struct ice_vf *vf; +	unsigned int bkt; + +	devlink = priv_to_devlink(pf); + +	devl_lock(devlink); +	mutex_lock(&pf->vfs.table_lock); +	ice_for_each_vf(pf, bkt, vf) { +		if (vf->devlink_port.devlink_rate) +			devl_rate_leaf_destroy(&vf->devlink_port); +	} +	mutex_unlock(&pf->vfs.table_lock); + +	devl_rate_nodes_destroy(devlink); +	devl_unlock(devlink); +} + +/** + * ice_enable_custom_tx - try to enable custom Tx feature + * @pf: pf struct + * + * This function tries to enable custom Tx feature, + * it's not possible to enable it, if DCB or ADQ is active. + */ +static bool ice_enable_custom_tx(struct ice_pf *pf) +{ +	struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info; +	struct device *dev = ice_pf_to_dev(pf); + +	if (pi->is_custom_tx_enabled) +		/* already enabled, return true */ +		return true; + +	if (ice_is_adq_active(pf)) { +		dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n"); +		return false; +	} + +	if (ice_is_dcb_active(pf)) { +		dev_err(dev, "DCB active, can't modify Tx scheduler tree\n"); +		return false; +	} + +	pi->is_custom_tx_enabled = true; + +	return true; +} + +/** + * ice_traverse_tx_tree - traverse Tx scheduler tree + * @devlink: devlink struct + * @node: current node, used for recursion + * @tc_node: tc_node struct, that is treated as a root + * @pf: pf struct + * + * This function traverses Tx scheduler tree and exports + * entire structure to the devlink-rate. + */ +static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node, +				 struct ice_sched_node *tc_node, struct ice_pf *pf) +{ +	struct devlink_rate *rate_node = NULL; +	struct ice_vf *vf; +	int i; + +	if (node->parent == tc_node) { +		/* create root node */ +		rate_node = devl_rate_node_create(devlink, node, node->name, NULL); +	} else if (node->vsi_handle && +		   pf->vsi[node->vsi_handle]->vf) { +		vf = pf->vsi[node->vsi_handle]->vf; +		if (!vf->devlink_port.devlink_rate) +			/* leaf nodes doesn't have children +			 * so we don't set rate_node +			 */ +			devl_rate_leaf_create(&vf->devlink_port, node, +					      node->parent->rate_node); +	} else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && +		   node->parent->rate_node) { +		rate_node = devl_rate_node_create(devlink, node, node->name, +						  node->parent->rate_node); +	} + +	if (rate_node && !IS_ERR(rate_node)) +		node->rate_node = rate_node; + +	for (i = 0; i < node->num_children; i++) +		ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); +} + +/** + * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate + * @devlink: devlink struct + * @vsi: main vsi struct + * + * This function finds a root node, then calls ice_traverse_tx tree, which + * traverses the tree and exports it's contents to devlink rate. + */ +int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi) +{ +	struct ice_port_info *pi = vsi->port_info; +	struct ice_sched_node *tc_node; +	struct ice_pf *pf = vsi->back; +	int i; + +	tc_node = pi->root->children[0]; +	mutex_lock(&pi->sched_lock); +	devl_lock(devlink); +	for (i = 0; i < tc_node->num_children; i++) +		ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); +	devl_unlock(devlink); +	mutex_unlock(&pi->sched_lock); + +	return 0; +} + +/** + * ice_set_object_tx_share - sets node scheduling parameter + * @pi: devlink struct instance + * @node: node struct instance + * @bw: bandwidth in bytes per second + * @extack: extended netdev ack structure + * + * This function sets ICE_MIN_BW scheduling BW limit. + */ +static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node, +				   u64 bw, struct netlink_ext_ack *extack) +{ +	int status; + +	mutex_lock(&pi->sched_lock); +	/* converts bytes per second to kilo bits per second */ +	node->tx_share = div_u64(bw, 125); +	status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share); +	mutex_unlock(&pi->sched_lock); + +	if (status) +		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share"); + +	return status; +} + +/** + * ice_set_object_tx_max - sets node scheduling parameter + * @pi: devlink struct instance + * @node: node struct instance + * @bw: bandwidth in bytes per second + * @extack: extended netdev ack structure + * + * This function sets ICE_MAX_BW scheduling BW limit. + */ +static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node, +				 u64 bw, struct netlink_ext_ack *extack) +{ +	int status; + +	mutex_lock(&pi->sched_lock); +	/* converts bytes per second value to kilo bits per second */ +	node->tx_max = div_u64(bw, 125); +	status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max); +	mutex_unlock(&pi->sched_lock); + +	if (status) +		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max"); + +	return status; +} + +/** + * ice_set_object_tx_priority - sets node scheduling parameter + * @pi: devlink struct instance + * @node: node struct instance + * @priority: value representing priority for strict priority arbitration + * @extack: extended netdev ack structure + * + * This function sets priority of node among siblings. + */ +static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node, +				      u32 priority, struct netlink_ext_ack *extack) +{ +	int status; + +	if (priority >= 8) { +		NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8"); +		return -EINVAL; +	} + +	mutex_lock(&pi->sched_lock); +	node->tx_priority = priority; +	status = ice_sched_set_node_priority(pi, node, node->tx_priority); +	mutex_unlock(&pi->sched_lock); + +	if (status) +		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority"); + +	return status; +} + +/** + * ice_set_object_tx_weight - sets node scheduling parameter + * @pi: devlink struct instance + * @node: node struct instance + * @weight: value represeting relative weight for WFQ arbitration + * @extack: extended netdev ack structure + * + * This function sets node weight for WFQ algorithm. + */ +static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node, +				    u32 weight, struct netlink_ext_ack *extack) +{ +	int status; + +	if (weight > 200 || weight < 1) { +		NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200"); +		return -EINVAL; +	} + +	mutex_lock(&pi->sched_lock); +	node->tx_weight = weight; +	status = ice_sched_set_node_weight(pi, node, node->tx_weight); +	mutex_unlock(&pi->sched_lock); + +	if (status) +		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight"); + +	return status; +} + +/** + * ice_get_pi_from_dev_rate - get port info from devlink_rate + * @rate_node: devlink struct instance + * + * This function returns corresponding port_info struct of devlink_rate + */ +static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node) +{ +	struct ice_pf *pf = devlink_priv(rate_node->devlink); + +	return ice_get_main_vsi(pf)->port_info; +} + +static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, +				     struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node; +	struct ice_port_info *pi; + +	pi = ice_get_pi_from_dev_rate(rate_node); + +	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) +		return -EBUSY; + +	/* preallocate memory for ice_sched_node */ +	node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); +	*priv = node; + +	return 0; +} + +static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, +				     struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node, *tc_node; +	struct ice_port_info *pi; + +	pi = ice_get_pi_from_dev_rate(rate_node); +	tc_node = pi->root->children[0]; +	node = priv; + +	if (!rate_node->parent || !node || tc_node == node || !extack) +		return 0; + +	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) +		return -EBUSY; + +	/* can't allow to delete a node with children */ +	if (node->num_children) +		return -EINVAL; + +	mutex_lock(&pi->sched_lock); +	ice_free_sched_node(pi, node); +	mutex_unlock(&pi->sched_lock); + +	return 0; +} + +static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, +					    u64 tx_max, struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node = priv; + +	if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) +		return -EBUSY; + +	if (!node) +		return 0; + +	return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf), +				     node, tx_max, extack); +} + +static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, +					      u64 tx_share, struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node = priv; + +	if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) +		return -EBUSY; + +	if (!node) +		return 0; + +	return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node, +				       tx_share, extack); +} + +static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv, +						 u32 tx_priority, struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node = priv; + +	if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) +		return -EBUSY; + +	if (!node) +		return 0; + +	return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node, +					  tx_priority, extack); +} + +static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv, +					       u32 tx_weight, struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node = priv; + +	if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink))) +		return -EBUSY; + +	if (!node) +		return 0; + +	return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node, +					tx_weight, extack); +} + +static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, +					    u64 tx_max, struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node = priv; + +	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) +		return -EBUSY; + +	if (!node) +		return 0; + +	return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node), +				     node, tx_max, extack); +} + +static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, +					      u64 tx_share, struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node = priv; + +	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) +		return -EBUSY; + +	if (!node) +		return 0; + +	return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node), +				       node, tx_share, extack); +} + +static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv, +						 u32 tx_priority, struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node = priv; + +	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) +		return -EBUSY; + +	if (!node) +		return 0; + +	return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node), +					  node, tx_priority, extack); +} + +static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv, +					       u32 tx_weight, struct netlink_ext_ack *extack) +{ +	struct ice_sched_node *node = priv; + +	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink))) +		return -EBUSY; + +	if (!node) +		return 0; + +	return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node), +					node, tx_weight, extack); +} + +static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, +				  struct devlink_rate *parent, +				  void *priv, void *parent_priv, +				  struct netlink_ext_ack *extack) +{ +	struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate); +	struct ice_sched_node *tc_node, *node, *parent_node; +	u16 num_nodes_added; +	u32 first_node_teid; +	u32 node_teid; +	int status; + +	tc_node = pi->root->children[0]; +	node = priv; + +	if (!extack) +		return 0; + +	if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink))) +		return -EBUSY; + +	if (!parent) { +		if (!node || tc_node == node || node->num_children) +			return -EINVAL; + +		mutex_lock(&pi->sched_lock); +		ice_free_sched_node(pi, node); +		mutex_unlock(&pi->sched_lock); + +		return 0; +	} + +	parent_node = parent_priv; + +	/* if the node doesn't exist, create it */ +	if (!node->parent) { +		mutex_lock(&pi->sched_lock); +		status = ice_sched_add_elems(pi, tc_node, parent_node, +					     parent_node->tx_sched_layer + 1, +					     1, &num_nodes_added, &first_node_teid, +					     &node); +		mutex_unlock(&pi->sched_lock); + +		if (status) { +			NL_SET_ERR_MSG_MOD(extack, "Can't add a new node"); +			return status; +		} + +		if (devlink_rate->tx_share) +			ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack); +		if (devlink_rate->tx_max) +			ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack); +		if (devlink_rate->tx_priority) +			ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack); +		if (devlink_rate->tx_weight) +			ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack); +	} else { +		node_teid = le32_to_cpu(node->info.node_teid); +		mutex_lock(&pi->sched_lock); +		status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid); +		mutex_unlock(&pi->sched_lock); + +		if (status) +			NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent"); +	} + +	return status; +} + +/** + * ice_devlink_reload_up - do reload up after reinit + * @devlink: pointer to the devlink instance reloading + * @action: the action requested + * @limit: limits imposed by userspace, such as not resetting + * @actions_performed: on return, indicate what actions actually performed + * @extack: netlink extended ACK structure + */ +static int +ice_devlink_reload_up(struct devlink *devlink, +		      enum devlink_reload_action action, +		      enum devlink_reload_limit limit, +		      u32 *actions_performed, +		      struct netlink_ext_ack *extack) +{ +	struct ice_pf *pf = devlink_priv(devlink); + +	switch (action) { +	case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: +		*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); +		return ice_load(pf); +	case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: +		*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); +		return ice_devlink_reload_empr_finish(pf, extack); +	default: +		WARN_ON(1); +		return -EOPNOTSUPP; +	} +} +  static const struct devlink_ops ice_devlink_ops = {  	.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, -	.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), +	.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | +			  BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),  	/* The ice driver currently does not support driver reinit */ -	.reload_down = ice_devlink_reload_empr_start, -	.reload_up = ice_devlink_reload_empr_finish, +	.reload_down = ice_devlink_reload_down, +	.reload_up = ice_devlink_reload_up,  	.port_split = ice_devlink_port_split,  	.port_unsplit = ice_devlink_port_unsplit,  	.eswitch_mode_get = ice_eswitch_mode_get,  	.eswitch_mode_set = ice_eswitch_mode_set,  	.info_get = ice_devlink_info_get,  	.flash_update = ice_devlink_flash_update, + +	.rate_node_new = ice_devlink_rate_node_new, +	.rate_node_del = ice_devlink_rate_node_del, + +	.rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set, +	.rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set, +	.rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set, +	.rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set, + +	.rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set, +	.rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set, +	.rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set, +	.rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set, + +	.rate_leaf_parent_set = ice_devlink_set_parent, +	.rate_node_parent_set = ice_devlink_set_parent,  };  static int @@ -881,7 +1435,6 @@ void ice_devlink_register(struct ice_pf *pf)  {  	struct devlink *devlink = priv_to_devlink(pf); -	devlink_set_features(devlink, DEVLINK_F_RELOAD);  	devlink_register(devlink);  } @@ -916,25 +1469,9 @@ ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)  int ice_devlink_register_params(struct ice_pf *pf)  {  	struct devlink *devlink = priv_to_devlink(pf); -	union devlink_param_value value; -	int err; -	err = devlink_params_register(devlink, ice_devlink_params, -				      ARRAY_SIZE(ice_devlink_params)); -	if (err) -		return err; - -	value.vbool = false; -	devlink_param_driverinit_value_set(devlink, -					   DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP, -					   value); - -	value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false; -	devlink_param_driverinit_value_set(devlink, -					   DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, -					   value); - -	return 0; +	return devlink_params_register(devlink, ice_devlink_params, +				       ARRAY_SIZE(ice_devlink_params));  }  void ice_devlink_unregister_params(struct ice_pf *pf) @@ -1033,12 +1570,7 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)   */  void ice_devlink_destroy_pf_port(struct ice_pf *pf)  { -	struct devlink_port *devlink_port; - -	devlink_port = &pf->devlink_port; - -	devlink_port_type_clear(devlink_port); -	devlink_port_unregister(devlink_port); +	devlink_port_unregister(&pf->devlink_port);  }  /** @@ -1094,31 +1626,28 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)   */  void ice_devlink_destroy_vf_port(struct ice_vf *vf)  { -	struct devlink_port *devlink_port; - -	devlink_port = &vf->devlink_port; - -	devlink_port_type_clear(devlink_port); -	devlink_port_unregister(devlink_port); +	devl_rate_leaf_destroy(&vf->devlink_port); +	devlink_port_unregister(&vf->devlink_port);  }  #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) +static const struct devlink_region_ops ice_nvm_region_ops; +static const struct devlink_region_ops ice_sram_region_ops; +  /**   * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents   * @devlink: the devlink instance - * @ops: the devlink region being snapshotted + * @ops: the devlink region to snapshot   * @extack: extended ACK response structure   * @data: on exit points to snapshot data buffer   * - * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for - * the nvm-flash devlink region. It captures a snapshot of the full NVM flash - * contents, including both banks of flash. This snapshot can later be viewed - * via the devlink-region interface. + * This function is called in response to a DEVLINK_CMD_REGION_NEW for either + * the nvm-flash or shadow-ram region.   * - * It captures the flash using the FLASH_ONLY bit set when reading via - * firmware, so it does not read the current Shadow RAM contents. For that, - * use the shadow-ram region. + * It captures a snapshot of the NVM or Shadow RAM flash contents. This + * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink + * interface.   *   * @returns zero on success, and updates the data pointer. Returns a non-zero   * error code on failure. @@ -1130,17 +1659,27 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,  	struct ice_pf *pf = devlink_priv(devlink);  	struct device *dev = ice_pf_to_dev(pf);  	struct ice_hw *hw = &pf->hw; +	bool read_shadow_ram;  	u8 *nvm_data, *tmp, i;  	u32 nvm_size, left;  	s8 num_blks;  	int status; -	nvm_size = hw->flash.flash_size; +	if (ops == &ice_nvm_region_ops) { +		read_shadow_ram = false; +		nvm_size = hw->flash.flash_size; +	} else if (ops == &ice_sram_region_ops) { +		read_shadow_ram = true; +		nvm_size = hw->flash.sr_words * 2u; +	} else { +		NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); +		return -EOPNOTSUPP; +	} +  	nvm_data = vzalloc(nvm_size);  	if (!nvm_data)  		return -ENOMEM; -  	num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);  	tmp = nvm_data;  	left = nvm_size; @@ -1164,7 +1703,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,  		}  		status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, -					   &read_sz, tmp, false); +					   &read_sz, tmp, read_shadow_ram);  		if (status) {  			dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",  				read_sz, status, hw->adminq.sq_last_status); @@ -1185,62 +1724,69 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,  }  /** - * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents + * ice_devlink_nvm_read - Read a portion of NVM flash contents   * @devlink: the devlink instance - * @ops: the devlink region being snapshotted + * @ops: the devlink region to snapshot   * @extack: extended ACK response structure - * @data: on exit points to snapshot data buffer + * @offset: the offset to start at + * @size: the amount to read + * @data: the data buffer to read into   * - * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for - * the shadow-ram devlink region. It captures a snapshot of the shadow ram - * contents. This snapshot can later be viewed via the devlink-region - * interface. + * This function is called in response to DEVLINK_CMD_REGION_READ to directly + * read a section of the NVM contents. + * + * It reads from either the nvm-flash or shadow-ram region contents.   *   * @returns zero on success, and updates the data pointer. Returns a non-zero   * error code on failure.   */ -static int -ice_devlink_sram_snapshot(struct devlink *devlink, -			  const struct devlink_region_ops __always_unused *ops, -			  struct netlink_ext_ack *extack, u8 **data) +static int ice_devlink_nvm_read(struct devlink *devlink, +				const struct devlink_region_ops *ops, +				struct netlink_ext_ack *extack, +				u64 offset, u32 size, u8 *data)  {  	struct ice_pf *pf = devlink_priv(devlink);  	struct device *dev = ice_pf_to_dev(pf);  	struct ice_hw *hw = &pf->hw; -	u8 *sram_data; -	u32 sram_size; -	int err; +	bool read_shadow_ram; +	u64 nvm_size; +	int status; -	sram_size = hw->flash.sr_words * 2u; -	sram_data = vzalloc(sram_size); -	if (!sram_data) -		return -ENOMEM; +	if (ops == &ice_nvm_region_ops) { +		read_shadow_ram = false; +		nvm_size = hw->flash.flash_size; +	} else if (ops == &ice_sram_region_ops) { +		read_shadow_ram = true; +		nvm_size = hw->flash.sr_words * 2u; +	} else { +		NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function"); +		return -EOPNOTSUPP; +	} -	err = ice_acquire_nvm(hw, ICE_RES_READ); -	if (err) { +	if (offset + size >= nvm_size) { +		NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size"); +		return -ERANGE; +	} + +	status = ice_acquire_nvm(hw, ICE_RES_READ); +	if (status) {  		dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", -			err, hw->adminq.sq_last_status); +			status, hw->adminq.sq_last_status);  		NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); -		vfree(sram_data); -		return err; +		return -EIO;  	} -	/* Read from the Shadow RAM, rather than directly from NVM */ -	err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true); -	if (err) { +	status = ice_read_flat_nvm(hw, (u32)offset, &size, data, +				   read_shadow_ram); +	if (status) {  		dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", -			sram_size, err, hw->adminq.sq_last_status); -		NL_SET_ERR_MSG_MOD(extack, -				   "Failed to read Shadow RAM contents"); +			size, status, hw->adminq.sq_last_status); +		NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");  		ice_release_nvm(hw); -		vfree(sram_data); -		return err; +		return -EIO;  	} -  	ice_release_nvm(hw); -	*data = sram_data; -  	return 0;  } @@ -1292,12 +1838,14 @@ static const struct devlink_region_ops ice_nvm_region_ops = {  	.name = "nvm-flash",  	.destructor = vfree,  	.snapshot = ice_devlink_nvm_snapshot, +	.read = ice_devlink_nvm_read,  };  static const struct devlink_region_ops ice_sram_region_ops = {  	.name = "shadow-ram",  	.destructor = vfree, -	.snapshot = ice_devlink_sram_snapshot, +	.snapshot = ice_devlink_nvm_snapshot, +	.read = ice_devlink_nvm_read,  };  static const struct devlink_region_ops ice_devcaps_region_ops = { diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index fe006d9946f8..6ec96779f52e 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -18,4 +18,7 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf);  void ice_devlink_init_regions(struct ice_pf *pf);  void ice_devlink_destroy_regions(struct ice_pf *pf); +int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi); +void ice_tear_down_devlink_rate_tree(struct ice_pf *pf); +  #endif /* _ICE_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index f9f15acae90a..f6dd3f8fd936 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -71,17 +71,17 @@ void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)  	if (!ice_is_switchdev_running(vf->pf))  		return; -	if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { +	if (is_valid_ether_addr(vf->hw_lan_addr)) {  		err = ice_eswitch_add_vf_mac_rule(vf->pf, vf, -						  vf->hw_lan_addr.addr); +						  vf->hw_lan_addr);  		if (err) {  			dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n", -				vf->hw_lan_addr.addr, vf->vf_id, err); +				vf->hw_lan_addr, vf->vf_id, err);  			return;  		}  		vf->num_mac++; -		ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); +		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);  	}  } @@ -237,7 +237,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)  		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);  		metadata_dst_free(vf->repr->dst);  		vf->repr->dst = NULL; -		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, +		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,  					       ICE_FWD_TO_VSI);  		netif_napi_del(&vf->repr->q_vector->napi); @@ -265,14 +265,14 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)  						   GFP_KERNEL);  		if (!vf->repr->dst) {  			ice_fltr_add_mac_and_broadcast(vsi, -						       vf->hw_lan_addr.addr, +						       vf->hw_lan_addr,  						       ICE_FWD_TO_VSI);  			goto err;  		}  		if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {  			ice_fltr_add_mac_and_broadcast(vsi, -						       vf->hw_lan_addr.addr, +						       vf->hw_lan_addr,  						       ICE_FWD_TO_VSI);  			metadata_dst_free(vf->repr->dst);  			vf->repr->dst = NULL; @@ -281,7 +281,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)  		if (ice_vsi_add_vlan_zero(vsi)) {  			ice_fltr_add_mac_and_broadcast(vsi, -						       vf->hw_lan_addr.addr, +						       vf->hw_lan_addr,  						       ICE_FWD_TO_VSI);  			metadata_dst_free(vf->repr->dst);  			vf->repr->dst = NULL; @@ -338,7 +338,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)  	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);  	if (ret) { -		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); +		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);  		dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",  			vsi->vf->vf_id);  	} @@ -425,7 +425,13 @@ static void ice_eswitch_release_env(struct ice_pf *pf)  static struct ice_vsi *  ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)  { -	return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL); +	struct ice_vsi_cfg_params params = {}; + +	params.type = ICE_VSI_SWITCHDEV_CTRL; +	params.pi = pi; +	params.flags = ICE_VSI_FLAG_INIT; + +	return ice_vsi_setup(pf, ¶ms);  }  /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index b7be84bbe72d..b360bd8f1599 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -151,6 +151,175 @@ static const u32 ice_regs_dump_list[] = {  	QINT_RQCTL(0),  	PFINT_OICR_ENA,  	QRX_ITR(0), +#define GLDCB_TLPM_PCI_DM			0x000A0180 +	GLDCB_TLPM_PCI_DM, +#define GLDCB_TLPM_TC2PFC			0x000A0194 +	GLDCB_TLPM_TC2PFC, +#define TCDCB_TLPM_WAIT_DM(_i)			(0x000A0080 + ((_i) * 4)) +	TCDCB_TLPM_WAIT_DM(0), +	TCDCB_TLPM_WAIT_DM(1), +	TCDCB_TLPM_WAIT_DM(2), +	TCDCB_TLPM_WAIT_DM(3), +	TCDCB_TLPM_WAIT_DM(4), +	TCDCB_TLPM_WAIT_DM(5), +	TCDCB_TLPM_WAIT_DM(6), +	TCDCB_TLPM_WAIT_DM(7), +	TCDCB_TLPM_WAIT_DM(8), +	TCDCB_TLPM_WAIT_DM(9), +	TCDCB_TLPM_WAIT_DM(10), +	TCDCB_TLPM_WAIT_DM(11), +	TCDCB_TLPM_WAIT_DM(12), +	TCDCB_TLPM_WAIT_DM(13), +	TCDCB_TLPM_WAIT_DM(14), +	TCDCB_TLPM_WAIT_DM(15), +	TCDCB_TLPM_WAIT_DM(16), +	TCDCB_TLPM_WAIT_DM(17), +	TCDCB_TLPM_WAIT_DM(18), +	TCDCB_TLPM_WAIT_DM(19), +	TCDCB_TLPM_WAIT_DM(20), +	TCDCB_TLPM_WAIT_DM(21), +	TCDCB_TLPM_WAIT_DM(22), +	TCDCB_TLPM_WAIT_DM(23), +	TCDCB_TLPM_WAIT_DM(24), +	TCDCB_TLPM_WAIT_DM(25), +	TCDCB_TLPM_WAIT_DM(26), +	TCDCB_TLPM_WAIT_DM(27), +	TCDCB_TLPM_WAIT_DM(28), +	TCDCB_TLPM_WAIT_DM(29), +	TCDCB_TLPM_WAIT_DM(30), +	TCDCB_TLPM_WAIT_DM(31), +#define GLPCI_WATMK_CLNT_PIPEMON		0x000BFD90 +	GLPCI_WATMK_CLNT_PIPEMON, +#define GLPCI_CUR_CLNT_COMMON			0x000BFD84 +	GLPCI_CUR_CLNT_COMMON, +#define GLPCI_CUR_CLNT_PIPEMON			0x000BFD88 +	GLPCI_CUR_CLNT_PIPEMON, +#define GLPCI_PCIERR				0x0009DEB0 +	GLPCI_PCIERR, +#define GLPSM_DEBUG_CTL_STATUS			0x000B0600 +	GLPSM_DEBUG_CTL_STATUS, +#define GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT	0x000B0680 +	GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT, +#define GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT	0x000B0684 +	GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT, +#define GLPSM0_DEBUG_DT_OUT_OF_WINDOW		0x000B0688 +	GLPSM0_DEBUG_DT_OUT_OF_WINDOW, +#define GLPSM0_DEBUG_INTF_HW_ERROR_DETECT	0x000B069C +	GLPSM0_DEBUG_INTF_HW_ERROR_DETECT, +#define GLPSM0_DEBUG_MISC_HW_ERROR_DETECT	0x000B06A0 +	GLPSM0_DEBUG_MISC_HW_ERROR_DETECT, +#define GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT	0x000B0E80 +	GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT, +#define GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT	0x000B0E84 +	GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT, +#define GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT	0x000B0E88 +	GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT, +#define GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT  0x000B0E8C +	GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT, +#define GLPSM1_DEBUG_MISC_HW_ERROR_DETECT       0x000B0E90 +	GLPSM1_DEBUG_MISC_HW_ERROR_DETECT, +#define GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT       0x000B1680 +	GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT, +#define GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT      0x000B1684 +	GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT, +#define GLPSM2_DEBUG_MISC_HW_ERROR_DETECT       0x000B1688 +	GLPSM2_DEBUG_MISC_HW_ERROR_DETECT, +#define GLTDPU_TCLAN_COMP_BOB(_i)               (0x00049ADC + ((_i) * 4)) +	GLTDPU_TCLAN_COMP_BOB(1), +	GLTDPU_TCLAN_COMP_BOB(2), +	GLTDPU_TCLAN_COMP_BOB(3), +	GLTDPU_TCLAN_COMP_BOB(4), +	GLTDPU_TCLAN_COMP_BOB(5), +	GLTDPU_TCLAN_COMP_BOB(6), +	GLTDPU_TCLAN_COMP_BOB(7), +	GLTDPU_TCLAN_COMP_BOB(8), +#define GLTDPU_TCB_CMD_BOB(_i)                  (0x0004975C + ((_i) * 4)) +	GLTDPU_TCB_CMD_BOB(1), +	GLTDPU_TCB_CMD_BOB(2), +	GLTDPU_TCB_CMD_BOB(3), +	GLTDPU_TCB_CMD_BOB(4), +	GLTDPU_TCB_CMD_BOB(5), +	GLTDPU_TCB_CMD_BOB(6), +	GLTDPU_TCB_CMD_BOB(7), +	GLTDPU_TCB_CMD_BOB(8), +#define GLTDPU_PSM_UPDATE_BOB(_i)               (0x00049B5C + ((_i) * 4)) +	GLTDPU_PSM_UPDATE_BOB(1), +	GLTDPU_PSM_UPDATE_BOB(2), +	GLTDPU_PSM_UPDATE_BOB(3), +	GLTDPU_PSM_UPDATE_BOB(4), +	GLTDPU_PSM_UPDATE_BOB(5), +	GLTDPU_PSM_UPDATE_BOB(6), +	GLTDPU_PSM_UPDATE_BOB(7), +	GLTDPU_PSM_UPDATE_BOB(8), +#define GLTCB_CMD_IN_BOB(_i)                    (0x000AE288 + ((_i) * 4)) +	GLTCB_CMD_IN_BOB(1), +	GLTCB_CMD_IN_BOB(2), +	GLTCB_CMD_IN_BOB(3), +	GLTCB_CMD_IN_BOB(4), +	GLTCB_CMD_IN_BOB(5), +	GLTCB_CMD_IN_BOB(6), +	GLTCB_CMD_IN_BOB(7), +	GLTCB_CMD_IN_BOB(8), +#define GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(_i)   (0x000FC148 + ((_i) * 4)) +	GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(1), +	GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(2), +	GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(3), +	GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(4), +	GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(5), +	GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(6), +	GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(7), +	GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(8), +#define GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(_i) (0x000FC248 + ((_i) * 4)) +	GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(1), +	GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(2), +	GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(3), +	GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(4), +	GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(5), +	GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(6), +	GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(7), +	GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(8), +#define GLLAN_TCLAN_CACHE_CTL_BOB_CTL(_i)       (0x000FC1C8 + ((_i) * 4)) +	GLLAN_TCLAN_CACHE_CTL_BOB_CTL(1), +	GLLAN_TCLAN_CACHE_CTL_BOB_CTL(2), +	GLLAN_TCLAN_CACHE_CTL_BOB_CTL(3), +	GLLAN_TCLAN_CACHE_CTL_BOB_CTL(4), +	GLLAN_TCLAN_CACHE_CTL_BOB_CTL(5), +	GLLAN_TCLAN_CACHE_CTL_BOB_CTL(6), +	GLLAN_TCLAN_CACHE_CTL_BOB_CTL(7), +	GLLAN_TCLAN_CACHE_CTL_BOB_CTL(8), +#define GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(_i)  (0x000FC188 + ((_i) * 4)) +	GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(1), +	GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(2), +	GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(3), +	GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(4), +	GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(5), +	GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(6), +	GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(7), +	GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(8), +#define GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(_i) (0x000FC288 + ((_i) * 4)) +	GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(1), +	GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(2), +	GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(3), +	GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(4), +	GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(5), +	GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(6), +	GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(7), +	GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(8), +#define PRTDCB_TCUPM_REG_CM(_i)			(0x000BC360 + ((_i) * 4)) +	PRTDCB_TCUPM_REG_CM(0), +	PRTDCB_TCUPM_REG_CM(1), +	PRTDCB_TCUPM_REG_CM(2), +	PRTDCB_TCUPM_REG_CM(3), +#define PRTDCB_TCUPM_REG_DM(_i)			(0x000BC3A0 + ((_i) * 4)) +	PRTDCB_TCUPM_REG_DM(0), +	PRTDCB_TCUPM_REG_DM(1), +	PRTDCB_TCUPM_REG_DM(2), +	PRTDCB_TCUPM_REG_DM(3), +#define PRTDCB_TLPM_REG_DM(_i)			(0x000A0000 + ((_i) * 4)) +	PRTDCB_TLPM_REG_DM(0), +	PRTDCB_TLPM_REG_DM(1), +	PRTDCB_TLPM_REG_DM(2), +	PRTDCB_TLPM_REG_DM(3),  };  struct ice_priv_flag { @@ -487,7 +656,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)  	if (status)  		goto err_setup_rx_ring; -	status = ice_vsi_cfg(vsi); +	status = ice_vsi_cfg_lan(vsi);  	if (status)  		goto err_setup_rx_ring; @@ -495,7 +664,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)  	if (status)  		goto err_start_rx_ring; -	return status; +	return 0;  err_start_rx_ring:  	ice_vsi_free_rx_rings(vsi); @@ -1375,9 +1544,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,  	ice_for_each_alloc_txq(vsi, j) {  		tx_ring = READ_ONCE(vsi->tx_rings[j]); -		if (tx_ring) { -			data[i++] = tx_ring->stats.pkts; -			data[i++] = tx_ring->stats.bytes; +		if (tx_ring && tx_ring->ring_stats) { +			data[i++] = tx_ring->ring_stats->stats.pkts; +			data[i++] = tx_ring->ring_stats->stats.bytes;  		} else {  			data[i++] = 0;  			data[i++] = 0; @@ -1386,9 +1555,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,  	ice_for_each_alloc_rxq(vsi, j) {  		rx_ring = READ_ONCE(vsi->rx_rings[j]); -		if (rx_ring) { -			data[i++] = rx_ring->stats.pkts; -			data[i++] = rx_ring->stats.bytes; +		if (rx_ring && rx_ring->ring_stats) { +			data[i++] = rx_ring->ring_stats->stats.pkts; +			data[i++] = rx_ring->ring_stats->stats.bytes;  		} else {  			data[i++] = 0;  			data[i++] = 0; @@ -1781,8 +1950,7 @@ ice_phy_type_to_ethtool(struct net_device *netdev,  			   ICE_PHY_TYPE_LOW_100G_CAUI4 |  			   ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |  			   ICE_PHY_TYPE_LOW_100G_AUI4 | -			   ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | -			   ICE_PHY_TYPE_LOW_100GBASE_CP2; +			   ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4;  	phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |  			   ICE_PHY_TYPE_HIGH_100G_CAUI2 |  			   ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | @@ -1795,15 +1963,27 @@ ice_phy_type_to_ethtool(struct net_device *netdev,  						100000baseCR4_Full);  	} -	phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 | -			   ICE_PHY_TYPE_LOW_100GBASE_SR2; -	if (phy_types_low & phy_type_mask_lo) { +	if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { +		ethtool_link_ksettings_add_link_mode(ks, supported, +						     100000baseCR2_Full); +		ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, +						100000baseCR2_Full); +	} + +	if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) {  		ethtool_link_ksettings_add_link_mode(ks, supported,  						     100000baseSR4_Full);  		ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,  						100000baseSR4_Full);  	} +	if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { +		ethtool_link_ksettings_add_link_mode(ks, supported, +						     100000baseSR2_Full); +		ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, +						100000baseSR2_Full); +	} +  	phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |  			   ICE_PHY_TYPE_LOW_100GBASE_DR;  	if (phy_types_low & phy_type_mask_lo) { @@ -1815,14 +1995,20 @@ ice_phy_type_to_ethtool(struct net_device *netdev,  	phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |  			   ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4; -	phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4; -	if (phy_types_low & phy_type_mask_lo || -	    phy_types_high & phy_type_mask_hi) { +	if (phy_types_low & phy_type_mask_lo) {  		ethtool_link_ksettings_add_link_mode(ks, supported,  						     100000baseKR4_Full);  		ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,  						100000baseKR4_Full);  	} + +	if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { +		ethtool_link_ksettings_add_link_mode(ks, supported, +						     100000baseKR2_Full); +		ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, +						100000baseKR2_Full); +	} +  }  #define TEST_SET_BITS_TIMEOUT	50 @@ -2073,17 +2259,15 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)  						  100baseT_Full))  		adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;  	if (ethtool_link_ksettings_test_link_mode(ks, advertising, -						  1000baseX_Full)) -		adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; -	if (ethtool_link_ksettings_test_link_mode(ks, advertising, +						  1000baseX_Full) || +	    ethtool_link_ksettings_test_link_mode(ks, advertising,  						  1000baseT_Full) ||  	    ethtool_link_ksettings_test_link_mode(ks, advertising,  						  1000baseKX_Full))  		adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;  	if (ethtool_link_ksettings_test_link_mode(ks, advertising, -						  2500baseT_Full)) -		adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; -	if (ethtool_link_ksettings_test_link_mode(ks, advertising, +						  2500baseT_Full) || +	    ethtool_link_ksettings_test_link_mode(ks, advertising,  						  2500baseX_Full))  		adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;  	if (ethtool_link_ksettings_test_link_mode(ks, advertising, @@ -2092,9 +2276,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)  	if (ethtool_link_ksettings_test_link_mode(ks, advertising,  						  10000baseT_Full) ||  	    ethtool_link_ksettings_test_link_mode(ks, advertising, -						  10000baseKR_Full)) -		adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; -	if (ethtool_link_ksettings_test_link_mode(ks, advertising, +						  10000baseKR_Full) || +	    ethtool_link_ksettings_test_link_mode(ks, advertising,  						  10000baseSR_Full) ||  	    ethtool_link_ksettings_test_link_mode(ks, advertising,  						  10000baseLR_Full)) @@ -2118,9 +2301,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)  	if (ethtool_link_ksettings_test_link_mode(ks, advertising,  						  50000baseCR2_Full) ||  	    ethtool_link_ksettings_test_link_mode(ks, advertising, -						  50000baseKR2_Full)) -		adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; -	if (ethtool_link_ksettings_test_link_mode(ks, advertising, +						  50000baseKR2_Full) || +	    ethtool_link_ksettings_test_link_mode(ks, advertising,  						  50000baseSR2_Full))  		adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;  	if (ethtool_link_ksettings_test_link_mode(ks, advertising, @@ -2130,7 +2312,13 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)  	    ethtool_link_ksettings_test_link_mode(ks, advertising,  						  100000baseLR4_ER4_Full) ||  	    ethtool_link_ksettings_test_link_mode(ks, advertising, -						  100000baseKR4_Full)) +						  100000baseKR4_Full) || +	    ethtool_link_ksettings_test_link_mode(ks, advertising, +						  100000baseCR2_Full) || +	    ethtool_link_ksettings_test_link_mode(ks, advertising, +						  100000baseSR2_Full) || +	    ethtool_link_ksettings_test_link_mode(ks, advertising, +						  100000baseKR2_Full))  		adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;  	return adv_link_speed; @@ -2858,8 +3046,6 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,  		/* clone ring and setup updated count */  		xdp_rings[i] = *vsi->xdp_rings[i];  		xdp_rings[i].count = new_tx_cnt; -		xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1; -		xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1;  		xdp_rings[i].desc = NULL;  		xdp_rings[i].tx_buf = NULL;  		err = ice_setup_tx_ring(&xdp_rings[i]); @@ -2904,7 +3090,7 @@ process_rx:  		/* allocate Rx buffers */  		err = ice_alloc_rx_bufs(&rx_rings[i], -					ICE_DESC_UNUSED(&rx_rings[i])); +					ICE_RX_DESC_UNUSED(&rx_rings[i]));  rx_unwind:  		if (err) {  			while (i) { @@ -3472,7 +3658,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)  	struct ice_vsi *vsi = np->vsi;  	struct ice_pf *pf = vsi->back;  	int new_rx = 0, new_tx = 0; +	bool locked = false;  	u32 curr_combined; +	int ret = 0;  	/* do not support changing channels in Safe Mode */  	if (ice_is_safe_mode(pf)) { @@ -3536,15 +3724,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)  		return -EINVAL;  	} -	ice_vsi_recfg_qs(vsi, new_rx, new_tx); +	if (pf->adev) { +		mutex_lock(&pf->adev_mutex); +		device_lock(&pf->adev->dev); +		locked = true; +		if (pf->adev->dev.driver) { +			netdev_err(dev, "Cannot change channels when RDMA is active\n"); +			ret = -EBUSY; +			goto adev_unlock; +		} +	} + +	ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked); -	if (!netif_is_rxfh_configured(dev)) -		return ice_vsi_set_dflt_rss_lut(vsi, new_rx); +	if (!netif_is_rxfh_configured(dev)) { +		ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx); +		goto adev_unlock; +	}  	/* Update rss_size due to change in Rx queues */  	vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx); -	return 0; +adev_unlock: +	if (locked) { +		device_unlock(&pf->adev->dev); +		mutex_unlock(&pf->adev_mutex); +	} +	return ret;  }  /** diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 4b3bb19e1d06..5ce413965930 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -6,23 +6,6 @@  #include "ice_flow.h"  #include "ice.h" -/* For supporting double VLAN mode, it is necessary to enable or disable certain - * boost tcam entries. The metadata labels names that match the following - * prefixes will be saved to allow enabling double VLAN mode. - */ -#define ICE_DVM_PRE	"BOOST_MAC_VLAN_DVM"	/* enable these entries */ -#define ICE_SVM_PRE	"BOOST_MAC_VLAN_SVM"	/* disable these entries */ - -/* To support tunneling entries by PF, the package will append the PF number to - * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. - */ -#define ICE_TNL_PRE	"TNL_" -static const struct ice_tunnel_type_scan tnls[] = { -	{ TNL_VXLAN,		"TNL_VXLAN_PF" }, -	{ TNL_GENEVE,		"TNL_GENEVE_PF" }, -	{ TNL_LAST,		"" } -}; -  static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {  	/* SWITCH */  	{ @@ -104,225 +87,6 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)  }  /** - * ice_pkg_val_buf - * @buf: pointer to the ice buffer - * - * This helper function validates a buffer's header. - */ -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) -{ -	struct ice_buf_hdr *hdr; -	u16 section_count; -	u16 data_end; - -	hdr = (struct ice_buf_hdr *)buf->buf; -	/* verify data */ -	section_count = le16_to_cpu(hdr->section_count); -	if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) -		return NULL; - -	data_end = le16_to_cpu(hdr->data_end); -	if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) -		return NULL; - -	return hdr; -} - -/** - * ice_find_buf_table - * @ice_seg: pointer to the ice segment - * - * Returns the address of the buffer table within the ice segment. - */ -static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) -{ -	struct ice_nvm_table *nvms; - -	nvms = (struct ice_nvm_table *) -		(ice_seg->device_table + -		 le32_to_cpu(ice_seg->device_table_count)); - -	return (__force struct ice_buf_table *) -		(nvms->vers + le32_to_cpu(nvms->table_count)); -} - -/** - * ice_pkg_enum_buf - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This function will enumerate all the buffers in the ice segment. The first - * call is made with the ice_seg parameter non-NULL; on subsequent calls, - * ice_seg is set to NULL which continues the enumeration. When the function - * returns a NULL pointer, then the end of the buffers has been reached, or an - * unexpected value has been detected (for example an invalid section count or - * an invalid buffer end value). - */ -static struct ice_buf_hdr * -ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ -	if (ice_seg) { -		state->buf_table = ice_find_buf_table(ice_seg); -		if (!state->buf_table) -			return NULL; - -		state->buf_idx = 0; -		return ice_pkg_val_buf(state->buf_table->buf_array); -	} - -	if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) -		return ice_pkg_val_buf(state->buf_table->buf_array + -				       state->buf_idx); -	else -		return NULL; -} - -/** - * ice_pkg_advance_sect - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This helper function will advance the section within the ice segment, - * also advancing the buffer if needed. - */ -static bool -ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ -	if (!ice_seg && !state->buf) -		return false; - -	if (!ice_seg && state->buf) -		if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) -			return true; - -	state->buf = ice_pkg_enum_buf(ice_seg, state); -	if (!state->buf) -		return false; - -	/* start of new buffer, reset section index */ -	state->sect_idx = 0; -	return true; -} - -/** - * ice_pkg_enum_section - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * - * This function will enumerate all the sections of a particular type in the - * ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the matching - * sections has been reached. - */ -static void * -ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, -		     u32 sect_type) -{ -	u16 offset, size; - -	if (ice_seg) -		state->type = sect_type; - -	if (!ice_pkg_advance_sect(ice_seg, state)) -		return NULL; - -	/* scan for next matching section */ -	while (state->buf->section_entry[state->sect_idx].type != -	       cpu_to_le32(state->type)) -		if (!ice_pkg_advance_sect(NULL, state)) -			return NULL; - -	/* validate section */ -	offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); -	if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) -		return NULL; - -	size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); -	if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) -		return NULL; - -	/* make sure the section fits in the buffer */ -	if (offset + size > ICE_PKG_BUF_SIZE) -		return NULL; - -	state->sect_type = -		le32_to_cpu(state->buf->section_entry[state->sect_idx].type); - -	/* calc pointer to this section */ -	state->sect = ((u8 *)state->buf) + -		le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); - -	return state->sect; -} - -/** - * ice_pkg_enum_entry - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * @offset: pointer to variable that receives the offset in the table (optional) - * @handler: function that handles access to the entries into the section type - * - * This function will enumerate all the entries in particular section type in - * the ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the entries has - * been reached. - * - * Since each section may have a different header and entry size, the handler - * function is needed to determine the number and location entries in each - * section. - * - * The offset parameter is optional, but should be used for sections that - * contain an offset for each section table. For such cases, the section handler - * function must return the appropriate offset + index to give the absolution - * offset for each entry. For example, if the base for a section's header - * indicates a base offset of 10, and the index for the entry is 2, then - * section handler function should set the offset to 10 + 2 = 12. - */ -static void * -ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, -		   u32 sect_type, u32 *offset, -		   void *(*handler)(u32 sect_type, void *section, -				    u32 index, u32 *offset)) -{ -	void *entry; - -	if (ice_seg) { -		if (!handler) -			return NULL; - -		if (!ice_pkg_enum_section(ice_seg, state, sect_type)) -			return NULL; - -		state->entry_idx = 0; -		state->handler = handler; -	} else { -		state->entry_idx++; -	} - -	if (!state->handler) -		return NULL; - -	/* get entry */ -	entry = state->handler(state->sect_type, state->sect, state->entry_idx, -			       offset); -	if (!entry) { -		/* end of a section, look for another section of this type */ -		if (!ice_pkg_enum_section(NULL, state, 0)) -			return NULL; - -		state->entry_idx = 0; -		entry = state->handler(state->sect_type, state->sect, -				       state->entry_idx, offset); -	} - -	return entry; -} - -/**   * ice_hw_ptype_ena - check if the PTYPE is enabled or not   * @hw: pointer to the HW structure   * @ptype: the hardware PTYPE @@ -333,312 +97,6 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)  	       test_bit(ptype, hw->hw_ptype);  } -/** - * ice_marker_ptype_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the Marker PType TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual Marker PType TCAM entries. - */ -static void * -ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, -			      u32 *offset) -{ -	struct ice_marker_ptype_tcam_section *marker_ptype; - -	if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) -		return NULL; - -	if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) -		return NULL; - -	if (offset) -		*offset = 0; - -	marker_ptype = section; -	if (index >= le16_to_cpu(marker_ptype->count)) -		return NULL; - -	return marker_ptype->tcam + index; -} - -/** - * ice_fill_hw_ptype - fill the enabled PTYPE bit information - * @hw: pointer to the HW structure - */ -static void ice_fill_hw_ptype(struct ice_hw *hw) -{ -	struct ice_marker_ptype_tcam_entry *tcam; -	struct ice_seg *seg = hw->seg; -	struct ice_pkg_enum state; - -	bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); -	if (!seg) -		return; - -	memset(&state, 0, sizeof(state)); - -	do { -		tcam = ice_pkg_enum_entry(seg, &state, -					  ICE_SID_RXPARSER_MARKER_PTYPE, NULL, -					  ice_marker_ptype_tcam_handler); -		if (tcam && -		    le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && -		    le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) -			set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); - -		seg = NULL; -	} while (tcam); -} - -/** - * ice_boost_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the boost TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual boost TCAM entries. - */ -static void * -ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ -	struct ice_boost_tcam_section *boost; - -	if (!section) -		return NULL; - -	if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) -		return NULL; - -	/* cppcheck-suppress nullPointer */ -	if (index > ICE_MAX_BST_TCAMS_IN_BUF) -		return NULL; - -	if (offset) -		*offset = 0; - -	boost = section; -	if (index >= le16_to_cpu(boost->count)) -		return NULL; - -	return boost->tcam + index; -} - -/** - * ice_find_boost_entry - * @ice_seg: pointer to the ice segment (non-NULL) - * @addr: Boost TCAM address of entry to search for - * @entry: returns pointer to the entry - * - * Finds a particular Boost TCAM entry and returns a pointer to that entry - * if it is found. The ice_seg parameter must not be NULL since the first call - * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. - */ -static int -ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, -		     struct ice_boost_tcam_entry **entry) -{ -	struct ice_boost_tcam_entry *tcam; -	struct ice_pkg_enum state; - -	memset(&state, 0, sizeof(state)); - -	if (!ice_seg) -		return -EINVAL; - -	do { -		tcam = ice_pkg_enum_entry(ice_seg, &state, -					  ICE_SID_RXPARSER_BOOST_TCAM, NULL, -					  ice_boost_tcam_handler); -		if (tcam && le16_to_cpu(tcam->addr) == addr) { -			*entry = tcam; -			return 0; -		} - -		ice_seg = NULL; -	} while (tcam); - -	*entry = NULL; -	return -EIO; -} - -/** - * ice_label_enum_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the label entry to be returned - * @offset: pointer to receive absolute offset, always zero for label sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual label entries. - */ -static void * -ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, -		       u32 *offset) -{ -	struct ice_label_section *labels; - -	if (!section) -		return NULL; - -	/* cppcheck-suppress nullPointer */ -	if (index > ICE_MAX_LABELS_IN_BUF) -		return NULL; - -	if (offset) -		*offset = 0; - -	labels = section; -	if (index >= le16_to_cpu(labels->count)) -		return NULL; - -	return labels->label + index; -} - -/** - * ice_enum_labels - * @ice_seg: pointer to the ice segment (NULL on subsequent calls) - * @type: the section type that will contain the label (0 on subsequent calls) - * @state: ice_pkg_enum structure that will hold the state of the enumeration - * @value: pointer to a value that will return the label's value if found - * - * Enumerates a list of labels in the package. The caller will call - * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call - * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL - * the end of the list has been reached. - */ -static char * -ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, -		u16 *value) -{ -	struct ice_label *label; - -	/* Check for valid label section on first call */ -	if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) -		return NULL; - -	label = ice_pkg_enum_entry(ice_seg, state, type, NULL, -				   ice_label_enum_handler); -	if (!label) -		return NULL; - -	*value = le16_to_cpu(label->value); -	return label->name; -} - -/** - * ice_add_tunnel_hint - * @hw: pointer to the HW structure - * @label_name: label text - * @val: value of the tunnel port boost entry - */ -static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) -{ -	if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { -		u16 i; - -		for (i = 0; tnls[i].type != TNL_LAST; i++) { -			size_t len = strlen(tnls[i].label_prefix); - -			/* Look for matching label start, before continuing */ -			if (strncmp(label_name, tnls[i].label_prefix, len)) -				continue; - -			/* Make sure this label matches our PF. Note that the PF -			 * character ('0' - '7') will be located where our -			 * prefix string's null terminator is located. -			 */ -			if ((label_name[len] - '0') == hw->pf_id) { -				hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; -				hw->tnl.tbl[hw->tnl.count].valid = false; -				hw->tnl.tbl[hw->tnl.count].boost_addr = val; -				hw->tnl.tbl[hw->tnl.count].port = 0; -				hw->tnl.count++; -				break; -			} -		} -	} -} - -/** - * ice_add_dvm_hint - * @hw: pointer to the HW structure - * @val: value of the boost entry - * @enable: true if entry needs to be enabled, or false if needs to be disabled - */ -static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) -{ -	if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { -		hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; -		hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; -		hw->dvm_upd.count++; -	} -} - -/** - * ice_init_pkg_hints - * @hw: pointer to the HW structure - * @ice_seg: pointer to the segment of the package scan (non-NULL) - * - * This function will scan the package and save off relevant information - * (hints or metadata) for driver use. The ice_seg parameter must not be NULL - * since the first call to ice_enum_labels requires a pointer to an actual - * ice_seg structure. - */ -static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) -{ -	struct ice_pkg_enum state; -	char *label_name; -	u16 val; -	int i; - -	memset(&hw->tnl, 0, sizeof(hw->tnl)); -	memset(&state, 0, sizeof(state)); - -	if (!ice_seg) -		return; - -	label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, -				     &val); - -	while (label_name) { -		if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) -			/* check for a tunnel entry */ -			ice_add_tunnel_hint(hw, label_name, val); - -		/* check for a dvm mode entry */ -		else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) -			ice_add_dvm_hint(hw, val, true); - -		/* check for a svm mode entry */ -		else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) -			ice_add_dvm_hint(hw, val, false); - -		label_name = ice_enum_labels(NULL, 0, &state, &val); -	} - -	/* Cache the appropriate boost TCAM entry pointers for tunnels */ -	for (i = 0; i < hw->tnl.count; i++) { -		ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, -				     &hw->tnl.tbl[i].boost_entry); -		if (hw->tnl.tbl[i].boost_entry) { -			hw->tnl.tbl[i].valid = true; -			if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) -				hw->tnl.valid_count[hw->tnl.tbl[i].type]++; -		} -	} - -	/* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ -	for (i = 0; i < hw->dvm_upd.count; i++) -		ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, -				     &hw->dvm_upd.tbl[i].boost_entry); -} -  /* Key creation */  #define ICE_DC_KEY	0x1	/* don't care */ @@ -810,51 +268,6 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,  }  /** - * ice_acquire_global_cfg_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the global config lock for reading - * or writing of the package. When attempting to obtain write access, the - * caller must check for the following two return values: - * - * 0         -  Means the caller has acquired the global config lock - *              and can perform writing of the package. - * -EALREADY - Indicates another driver has already written the - *             package or has found that no update was necessary; in - *             this case, the caller can just skip performing any - *             update of the package. - */ -static int -ice_acquire_global_cfg_lock(struct ice_hw *hw, -			    enum ice_aq_res_access_type access) -{ -	int status; - -	status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, -				 ICE_GLOBAL_CFG_LOCK_TIMEOUT); - -	if (!status) -		mutex_lock(&ice_global_cfg_lock_sw); -	else if (status == -EALREADY) -		ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); - -	return status; -} - -/** - * ice_release_global_cfg_lock - * @hw: pointer to the HW structure - * - * This function will release the global config lock. - */ -static void ice_release_global_cfg_lock(struct ice_hw *hw) -{ -	mutex_unlock(&ice_global_cfg_lock_sw); -	ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); -} - -/**   * ice_acquire_change_lock   * @hw: pointer to the HW structure   * @access: access type (read or write) @@ -880,1325 +293,6 @@ void ice_release_change_lock(struct ice_hw *hw)  }  /** - * ice_aq_download_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer to transfer - * @buf_size: the size of the package buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Download Package (0x0C40) - */ -static int -ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, -		    u16 buf_size, bool last_buf, u32 *error_offset, -		    u32 *error_info, struct ice_sq_cd *cd) -{ -	struct ice_aqc_download_pkg *cmd; -	struct ice_aq_desc desc; -	int status; - -	if (error_offset) -		*error_offset = 0; -	if (error_info) -		*error_info = 0; - -	cmd = &desc.params.download_pkg; -	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); -	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - -	if (last_buf) -		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - -	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); -	if (status == -EIO) { -		/* Read error from buffer only when the FW returned an error */ -		struct ice_aqc_download_pkg_resp *resp; - -		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; -		if (error_offset) -			*error_offset = le32_to_cpu(resp->error_offset); -		if (error_info) -			*error_info = le32_to_cpu(resp->error_info); -	} - -	return status; -} - -/** - * ice_aq_upload_section - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer which will receive the section - * @buf_size: the size of the package buffer - * @cd: pointer to command details structure or NULL - * - * Upload Section (0x0C41) - */ -int -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, -		      u16 buf_size, struct ice_sq_cd *cd) -{ -	struct ice_aq_desc desc; - -	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); -	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - -	return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); -} - -/** - * ice_aq_update_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package cmd buffer - * @buf_size: the size of the package cmd buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Update Package (0x0C42) - */ -static int -ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, -		  bool last_buf, u32 *error_offset, u32 *error_info, -		  struct ice_sq_cd *cd) -{ -	struct ice_aqc_download_pkg *cmd; -	struct ice_aq_desc desc; -	int status; - -	if (error_offset) -		*error_offset = 0; -	if (error_info) -		*error_info = 0; - -	cmd = &desc.params.download_pkg; -	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); -	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - -	if (last_buf) -		cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - -	status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); -	if (status == -EIO) { -		/* Read error from buffer only when the FW returned an error */ -		struct ice_aqc_download_pkg_resp *resp; - -		resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; -		if (error_offset) -			*error_offset = le32_to_cpu(resp->error_offset); -		if (error_info) -			*error_info = le32_to_cpu(resp->error_info); -	} - -	return status; -} - -/** - * ice_find_seg_in_pkg - * @hw: pointer to the hardware structure - * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - */ -static struct ice_generic_seg_hdr * -ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, -		    struct ice_pkg_hdr *pkg_hdr) -{ -	u32 i; - -	ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", -		  pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, -		  pkg_hdr->pkg_format_ver.update, -		  pkg_hdr->pkg_format_ver.draft); - -	/* Search all package segments for the requested segment type */ -	for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { -		struct ice_generic_seg_hdr *seg; - -		seg = (struct ice_generic_seg_hdr *) -			((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); - -		if (le32_to_cpu(seg->seg_type) == seg_type) -			return seg; -	} - -	return NULL; -} - -/** - * ice_update_pkg_no_lock - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - */ -static int -ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ -	int status = 0; -	u32 i; - -	for (i = 0; i < count; i++) { -		struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); -		bool last = ((i + 1) == count); -		u32 offset, info; - -		status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), -					   last, &offset, &info, NULL); - -		if (status) { -			ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", -				  status, offset, info); -			break; -		} -	} - -	return status; -} - -/** - * ice_update_pkg - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains change lock and updates package. - */ -static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ -	int status; - -	status = ice_acquire_change_lock(hw, ICE_RES_WRITE); -	if (status) -		return status; - -	status = ice_update_pkg_no_lock(hw, bufs, count); - -	ice_release_change_lock(hw); - -	return status; -} - -static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) -{ -	switch (aq_err) { -	case ICE_AQ_RC_ENOSEC: -	case ICE_AQ_RC_EBADSIG: -		return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; -	case ICE_AQ_RC_ESVN: -		return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; -	case ICE_AQ_RC_EBADMAN: -	case ICE_AQ_RC_EBADBUF: -		return ICE_DDP_PKG_LOAD_ERROR; -	default: -		return ICE_DDP_PKG_ERR; -	} -} - -/** - * ice_dwnld_cfg_bufs - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains global config lock and downloads the package configuration buffers - * to the firmware. Metadata buffers are skipped, and the first metadata buffer - * found indicates that the rest of the buffers are all metadata buffers. - */ -static enum ice_ddp_state -ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ -	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; -	struct ice_buf_hdr *bh; -	enum ice_aq_err err; -	u32 offset, info, i; -	int status; - -	if (!bufs || !count) -		return ICE_DDP_PKG_ERR; - -	/* If the first buffer's first section has its metadata bit set -	 * then there are no buffers to be downloaded, and the operation is -	 * considered a success. -	 */ -	bh = (struct ice_buf_hdr *)bufs; -	if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) -		return ICE_DDP_PKG_SUCCESS; - -	status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); -	if (status) { -		if (status == -EALREADY) -			return ICE_DDP_PKG_ALREADY_LOADED; -		return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); -	} - -	for (i = 0; i < count; i++) { -		bool last = ((i + 1) == count); - -		if (!last) { -			/* check next buffer for metadata flag */ -			bh = (struct ice_buf_hdr *)(bufs + i + 1); - -			/* A set metadata flag in the next buffer will signal -			 * that the current buffer will be the last buffer -			 * downloaded -			 */ -			if (le16_to_cpu(bh->section_count)) -				if (le32_to_cpu(bh->section_entry[0].type) & -				    ICE_METADATA_BUF) -					last = true; -		} - -		bh = (struct ice_buf_hdr *)(bufs + i); - -		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, -					     &offset, &info, NULL); - -		/* Save AQ status from download package */ -		if (status) { -			ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", -				  status, offset, info); -			err = hw->adminq.sq_last_status; -			state = ice_map_aq_err_to_ddp_state(err); -			break; -		} - -		if (last) -			break; -	} - -	if (!status) { -		status = ice_set_vlan_mode(hw); -		if (status) -			ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", -				  status); -	} - -	ice_release_global_cfg_lock(hw); - -	return state; -} - -/** - * ice_aq_get_pkg_info_list - * @hw: pointer to the hardware structure - * @pkg_info: the buffer which will receive the information list - * @buf_size: the size of the pkg_info information buffer - * @cd: pointer to command details structure or NULL - * - * Get Package Info List (0x0C43) - */ -static int -ice_aq_get_pkg_info_list(struct ice_hw *hw, -			 struct ice_aqc_get_pkg_info_resp *pkg_info, -			 u16 buf_size, struct ice_sq_cd *cd) -{ -	struct ice_aq_desc desc; - -	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); - -	return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); -} - -/** - * ice_download_pkg - * @hw: pointer to the hardware structure - * @ice_seg: pointer to the segment of the package to be downloaded - * - * Handles the download of a complete package. - */ -static enum ice_ddp_state -ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) -{ -	struct ice_buf_table *ice_buf_tbl; -	int status; - -	ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", -		  ice_seg->hdr.seg_format_ver.major, -		  ice_seg->hdr.seg_format_ver.minor, -		  ice_seg->hdr.seg_format_ver.update, -		  ice_seg->hdr.seg_format_ver.draft); - -	ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", -		  le32_to_cpu(ice_seg->hdr.seg_type), -		  le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); - -	ice_buf_tbl = ice_find_buf_table(ice_seg); - -	ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", -		  le32_to_cpu(ice_buf_tbl->buf_count)); - -	status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, -				    le32_to_cpu(ice_buf_tbl->buf_count)); - -	ice_post_pkg_dwnld_vlan_mode_cfg(hw); - -	return status; -} - -/** - * ice_init_pkg_info - * @hw: pointer to the hardware structure - * @pkg_hdr: pointer to the driver's package hdr - * - * Saves off the package details into the HW structure. - */ -static enum ice_ddp_state -ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) -{ -	struct ice_generic_seg_hdr *seg_hdr; - -	if (!pkg_hdr) -		return ICE_DDP_PKG_ERR; - -	seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); -	if (seg_hdr) { -		struct ice_meta_sect *meta; -		struct ice_pkg_enum state; - -		memset(&state, 0, sizeof(state)); - -		/* Get package information from the Metadata Section */ -		meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, -					    ICE_SID_METADATA); -		if (!meta) { -			ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); -			return ICE_DDP_PKG_INVALID_FILE; -		} - -		hw->pkg_ver = meta->ver; -		memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); - -		ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", -			  meta->ver.major, meta->ver.minor, meta->ver.update, -			  meta->ver.draft, meta->name); - -		hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; -		memcpy(hw->ice_seg_id, seg_hdr->seg_id, -		       sizeof(hw->ice_seg_id)); - -		ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", -			  seg_hdr->seg_format_ver.major, -			  seg_hdr->seg_format_ver.minor, -			  seg_hdr->seg_format_ver.update, -			  seg_hdr->seg_format_ver.draft, -			  seg_hdr->seg_id); -	} else { -		ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); -		return ICE_DDP_PKG_INVALID_FILE; -	} - -	return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_get_pkg_info - * @hw: pointer to the hardware structure - * - * Store details of the package currently loaded in HW into the HW structure. - */ -static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) -{ -	enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; -	struct ice_aqc_get_pkg_info_resp *pkg_info; -	u16 size; -	u32 i; - -	size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); -	pkg_info = kzalloc(size, GFP_KERNEL); -	if (!pkg_info) -		return ICE_DDP_PKG_ERR; - -	if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { -		state = ICE_DDP_PKG_ERR; -		goto init_pkg_free_alloc; -	} - -	for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { -#define ICE_PKG_FLAG_COUNT	4 -		char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; -		u8 place = 0; - -		if (pkg_info->pkg_info[i].is_active) { -			flags[place++] = 'A'; -			hw->active_pkg_ver = pkg_info->pkg_info[i].ver; -			hw->active_track_id = -				le32_to_cpu(pkg_info->pkg_info[i].track_id); -			memcpy(hw->active_pkg_name, -			       pkg_info->pkg_info[i].name, -			       sizeof(pkg_info->pkg_info[i].name)); -			hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; -		} -		if (pkg_info->pkg_info[i].is_active_at_boot) -			flags[place++] = 'B'; -		if (pkg_info->pkg_info[i].is_modified) -			flags[place++] = 'M'; -		if (pkg_info->pkg_info[i].is_in_nvm) -			flags[place++] = 'N'; - -		ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", -			  i, pkg_info->pkg_info[i].ver.major, -			  pkg_info->pkg_info[i].ver.minor, -			  pkg_info->pkg_info[i].ver.update, -			  pkg_info->pkg_info[i].ver.draft, -			  pkg_info->pkg_info[i].name, flags); -	} - -init_pkg_free_alloc: -	kfree(pkg_info); - -	return state; -} - -/** - * ice_verify_pkg - verify package - * @pkg: pointer to the package buffer - * @len: size of the package buffer - * - * Verifies various attributes of the package file, including length, format - * version, and the requirement of at least one segment. - */ -static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) -{ -	u32 seg_count; -	u32 i; - -	if (len < struct_size(pkg, seg_offset, 1)) -		return ICE_DDP_PKG_INVALID_FILE; - -	if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || -	    pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || -	    pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || -	    pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) -		return ICE_DDP_PKG_INVALID_FILE; - -	/* pkg must have at least one segment */ -	seg_count = le32_to_cpu(pkg->seg_count); -	if (seg_count < 1) -		return ICE_DDP_PKG_INVALID_FILE; - -	/* make sure segment array fits in package length */ -	if (len < struct_size(pkg, seg_offset, seg_count)) -		return ICE_DDP_PKG_INVALID_FILE; - -	/* all segments must fit within length */ -	for (i = 0; i < seg_count; i++) { -		u32 off = le32_to_cpu(pkg->seg_offset[i]); -		struct ice_generic_seg_hdr *seg; - -		/* segment header must fit */ -		if (len < off + sizeof(*seg)) -			return ICE_DDP_PKG_INVALID_FILE; - -		seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); - -		/* segment body must fit */ -		if (len < off + le32_to_cpu(seg->seg_size)) -			return ICE_DDP_PKG_INVALID_FILE; -	} - -	return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_free_seg - free package segment pointer - * @hw: pointer to the hardware structure - * - * Frees the package segment pointer in the proper manner, depending on if the - * segment was allocated or just the passed in pointer was stored. - */ -void ice_free_seg(struct ice_hw *hw) -{ -	if (hw->pkg_copy) { -		devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); -		hw->pkg_copy = NULL; -		hw->pkg_size = 0; -	} -	hw->seg = NULL; -} - -/** - * ice_init_pkg_regs - initialize additional package registers - * @hw: pointer to the hardware structure - */ -static void ice_init_pkg_regs(struct ice_hw *hw) -{ -#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF -#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF -#define ICE_SW_BLK_IDX	0 - -	/* setup Switch block input mask, which is 48-bits in two parts */ -	wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); -	wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); -} - -/** - * ice_chk_pkg_version - check package version for compatibility with driver - * @pkg_ver: pointer to a version structure to check - * - * Check to make sure that the package about to be downloaded is compatible with - * the driver. To be compatible, the major and minor components of the package - * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR - * definitions. - */ -static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) -{ -	if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || -	    (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && -	     pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) -		return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; -	else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || -		 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && -		  pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) -		return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; - -	return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_chk_pkg_compat - * @hw: pointer to the hardware structure - * @ospkg: pointer to the package hdr - * @seg: pointer to the package segment hdr - * - * This function checks the package version compatibility with driver and NVM - */ -static enum ice_ddp_state -ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, -		   struct ice_seg **seg) -{ -	struct ice_aqc_get_pkg_info_resp *pkg; -	enum ice_ddp_state state; -	u16 size; -	u32 i; - -	/* Check package version compatibility */ -	state = ice_chk_pkg_version(&hw->pkg_ver); -	if (state) { -		ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); -		return state; -	} - -	/* find ICE segment in given package */ -	*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, -						     ospkg); -	if (!*seg) { -		ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); -		return ICE_DDP_PKG_INVALID_FILE; -	} - -	/* Check if FW is compatible with the OS package */ -	size = struct_size(pkg, pkg_info, ICE_PKG_CNT); -	pkg = kzalloc(size, GFP_KERNEL); -	if (!pkg) -		return ICE_DDP_PKG_ERR; - -	if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { -		state = ICE_DDP_PKG_LOAD_ERROR; -		goto fw_ddp_compat_free_alloc; -	} - -	for (i = 0; i < le32_to_cpu(pkg->count); i++) { -		/* loop till we find the NVM package */ -		if (!pkg->pkg_info[i].is_in_nvm) -			continue; -		if ((*seg)->hdr.seg_format_ver.major != -			pkg->pkg_info[i].ver.major || -		    (*seg)->hdr.seg_format_ver.minor > -			pkg->pkg_info[i].ver.minor) { -			state = ICE_DDP_PKG_FW_MISMATCH; -			ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); -		} -		/* done processing NVM package so break */ -		break; -	} -fw_ddp_compat_free_alloc: -	kfree(pkg); -	return state; -} - -/** - * ice_sw_fv_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the field vector entry to be returned - * @offset: ptr to variable that receives the offset in the field vector table - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * This function treats the given section as of type ice_sw_fv_section and - * enumerates offset field. "offset" is an index into the field vector table. - */ -static void * -ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ -	struct ice_sw_fv_section *fv_section = section; - -	if (!section || sect_type != ICE_SID_FLD_VEC_SW) -		return NULL; -	if (index >= le16_to_cpu(fv_section->count)) -		return NULL; -	if (offset) -		/* "index" passed in to this function is relative to a given -		 * 4k block. To get to the true index into the field vector -		 * table need to add the relative index to the base_offset -		 * field of this section -		 */ -		*offset = le16_to_cpu(fv_section->base_offset) + index; -	return fv_section->fv + index; -} - -/** - * ice_get_prof_index_max - get the max profile index for used profile - * @hw: pointer to the HW struct - * - * Calling this function will get the max profile index for used profile - * and store the index number in struct ice_switch_info *switch_info - * in HW for following use. - */ -static int ice_get_prof_index_max(struct ice_hw *hw) -{ -	u16 prof_index = 0, j, max_prof_index = 0; -	struct ice_pkg_enum state; -	struct ice_seg *ice_seg; -	bool flag = false; -	struct ice_fv *fv; -	u32 offset; - -	memset(&state, 0, sizeof(state)); - -	if (!hw->seg) -		return -EINVAL; - -	ice_seg = hw->seg; - -	do { -		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, -					&offset, ice_sw_fv_handler); -		if (!fv) -			break; -		ice_seg = NULL; - -		/* in the profile that not be used, the prot_id is set to 0xff -		 * and the off is set to 0x1ff for all the field vectors. -		 */ -		for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) -			if (fv->ew[j].prot_id != ICE_PROT_INVALID || -			    fv->ew[j].off != ICE_FV_OFFSET_INVAL) -				flag = true; -		if (flag && prof_index > max_prof_index) -			max_prof_index = prof_index; - -		prof_index++; -		flag = false; -	} while (fv); - -	hw->switch_info->max_used_prof_index = max_prof_index; - -	return 0; -} - -/** - * ice_get_ddp_pkg_state - get DDP pkg state after download - * @hw: pointer to the HW struct - * @already_loaded: indicates if pkg was already loaded onto the device - */ -static enum ice_ddp_state -ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) -{ -	if (hw->pkg_ver.major == hw->active_pkg_ver.major && -	    hw->pkg_ver.minor == hw->active_pkg_ver.minor && -	    hw->pkg_ver.update == hw->active_pkg_ver.update && -	    hw->pkg_ver.draft == hw->active_pkg_ver.draft && -	    !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { -		if (already_loaded) -			return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; -		else -			return ICE_DDP_PKG_SUCCESS; -	} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || -		   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { -		return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; -	} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && -		   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { -		return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; -	} else { -		return ICE_DDP_PKG_ERR; -	} -} - -/** - * ice_init_pkg - initialize/download package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function initializes a package. The package contains HW tables - * required to do packet processing. First, the function extracts package - * information such as version. Then it finds the ice configuration segment - * within the package; this function then saves a copy of the segment pointer - * within the supplied package buffer. Next, the function will cache any hints - * from the package, followed by downloading the package itself. Note, that if - * a previous PF driver has already downloaded the package successfully, then - * the current driver will not have to download the package again. - * - * The local package contents will be used to query default behavior and to - * update specific sections of the HW's version of the package (e.g. to update - * the parse graph to understand new protocols). - * - * This function stores a pointer to the package buffer memory, and it is - * expected that the supplied buffer will not be freed immediately. If the - * package buffer needs to be freed, such as when read from a file, use - * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this - * case. - */ -enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) -{ -	bool already_loaded = false; -	enum ice_ddp_state state; -	struct ice_pkg_hdr *pkg; -	struct ice_seg *seg; - -	if (!buf || !len) -		return ICE_DDP_PKG_ERR; - -	pkg = (struct ice_pkg_hdr *)buf; -	state = ice_verify_pkg(pkg, len); -	if (state) { -		ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", -			  state); -		return state; -	} - -	/* initialize package info */ -	state = ice_init_pkg_info(hw, pkg); -	if (state) -		return state; - -	/* before downloading the package, check package version for -	 * compatibility with driver -	 */ -	state = ice_chk_pkg_compat(hw, pkg, &seg); -	if (state) -		return state; - -	/* initialize package hints and then download package */ -	ice_init_pkg_hints(hw, seg); -	state = ice_download_pkg(hw, seg); -	if (state == ICE_DDP_PKG_ALREADY_LOADED) { -		ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); -		already_loaded = true; -	} - -	/* Get information on the package currently loaded in HW, then make sure -	 * the driver is compatible with this version. -	 */ -	if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { -		state = ice_get_pkg_info(hw); -		if (!state) -			state = ice_get_ddp_pkg_state(hw, already_loaded); -	} - -	if (ice_is_init_pkg_successful(state)) { -		hw->seg = seg; -		/* on successful package download update other required -		 * registers to support the package and fill HW tables -		 * with package content. -		 */ -		ice_init_pkg_regs(hw); -		ice_fill_blk_tbls(hw); -		ice_fill_hw_ptype(hw); -		ice_get_prof_index_max(hw); -	} else { -		ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", -			  state); -	} - -	return state; -} - -/** - * ice_copy_and_init_pkg - initialize/download a copy of the package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function copies the package buffer, and then calls ice_init_pkg() to - * initialize the copied package contents. - * - * The copying is necessary if the package buffer supplied is constant, or if - * the memory may disappear shortly after calling this function. - * - * If the package buffer resides in the data segment and can be modified, the - * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). - * - * However, if the package buffer needs to be copied first, such as when being - * read from a file, the caller should use ice_copy_and_init_pkg(). - * - * This function will first copy the package buffer, before calling - * ice_init_pkg(). The caller is free to immediately destroy the original - * package buffer, as the new copy will be managed by this function and - * related routines. - */ -enum ice_ddp_state -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) -{ -	enum ice_ddp_state state; -	u8 *buf_copy; - -	if (!buf || !len) -		return ICE_DDP_PKG_ERR; - -	buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); - -	state = ice_init_pkg(hw, buf_copy, len); -	if (!ice_is_init_pkg_successful(state)) { -		/* Free the copy, since we failed to initialize the package */ -		devm_kfree(ice_hw_to_dev(hw), buf_copy); -	} else { -		/* Track the copied pkg so we can free it later */ -		hw->pkg_copy = buf_copy; -		hw->pkg_size = len; -	} - -	return state; -} - -/** - * ice_is_init_pkg_successful - check if DDP init was successful - * @state: state of the DDP pkg after download - */ -bool ice_is_init_pkg_successful(enum ice_ddp_state state) -{ -	switch (state) { -	case ICE_DDP_PKG_SUCCESS: -	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: -	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: -		return true; -	default: -		return false; -	} -} - -/** - * ice_pkg_buf_alloc - * @hw: pointer to the HW structure - * - * Allocates a package buffer and returns a pointer to the buffer header. - * Note: all package contents must be in Little Endian form. - */ -static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) -{ -	struct ice_buf_build *bld; -	struct ice_buf_hdr *buf; - -	bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); -	if (!bld) -		return NULL; - -	buf = (struct ice_buf_hdr *)bld; -	buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr, -					     section_entry)); -	return bld; -} - -static bool ice_is_gtp_u_profile(u16 prof_idx) -{ -	return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && -		prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || -	       prof_idx == ICE_PROFID_IPV4_GTPU_TEID; -} - -static bool ice_is_gtp_c_profile(u16 prof_idx) -{ -	switch (prof_idx) { -	case ICE_PROFID_IPV4_GTPC_TEID: -	case ICE_PROFID_IPV4_GTPC_NO_TEID: -	case ICE_PROFID_IPV6_GTPC_TEID: -	case ICE_PROFID_IPV6_GTPC_NO_TEID: -		return true; -	default: -		return false; -	} -} - -/** - * ice_get_sw_prof_type - determine switch profile type - * @hw: pointer to the HW structure - * @fv: pointer to the switch field vector - * @prof_idx: profile index to check - */ -static enum ice_prof_type -ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx) -{ -	u16 i; - -	if (ice_is_gtp_c_profile(prof_idx)) -		return ICE_PROF_TUN_GTPC; - -	if (ice_is_gtp_u_profile(prof_idx)) -		return ICE_PROF_TUN_GTPU; - -	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { -		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */ -		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && -		    fv->ew[i].off == ICE_VNI_OFFSET) -			return ICE_PROF_TUN_UDP; - -		/* GRE tunnel will have GRE protocol */ -		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) -			return ICE_PROF_TUN_GRE; -	} - -	return ICE_PROF_NON_TUN; -} - -/** - * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type - * @hw: pointer to hardware structure - * @req_profs: type of profiles requested - * @bm: pointer to memory for returning the bitmap of field vectors - */ -void -ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, -		     unsigned long *bm) -{ -	struct ice_pkg_enum state; -	struct ice_seg *ice_seg; -	struct ice_fv *fv; - -	if (req_profs == ICE_PROF_ALL) { -		bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); -		return; -	} - -	memset(&state, 0, sizeof(state)); -	bitmap_zero(bm, ICE_MAX_NUM_PROFILES); -	ice_seg = hw->seg; -	do { -		enum ice_prof_type prof_type; -		u32 offset; - -		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, -					&offset, ice_sw_fv_handler); -		ice_seg = NULL; - -		if (fv) { -			/* Determine field vector type */ -			prof_type = ice_get_sw_prof_type(hw, fv, offset); - -			if (req_profs & prof_type) -				set_bit((u16)offset, bm); -		} -	} while (fv); -} - -/** - * ice_get_sw_fv_list - * @hw: pointer to the HW structure - * @lkups: list of protocol types - * @bm: bitmap of field vectors to consider - * @fv_list: Head of a list - * - * Finds all the field vector entries from switch block that contain - * a given protocol ID and offset and returns a list of structures of type - * "ice_sw_fv_list_entry". Every structure in the list has a field vector - * definition and profile ID information - * NOTE: The caller of the function is responsible for freeing the memory - * allocated for every list entry. - */ -int -ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, -		   unsigned long *bm, struct list_head *fv_list) -{ -	struct ice_sw_fv_list_entry *fvl; -	struct ice_sw_fv_list_entry *tmp; -	struct ice_pkg_enum state; -	struct ice_seg *ice_seg; -	struct ice_fv *fv; -	u32 offset; - -	memset(&state, 0, sizeof(state)); - -	if (!lkups->n_val_words || !hw->seg) -		return -EINVAL; - -	ice_seg = hw->seg; -	do { -		u16 i; - -		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, -					&offset, ice_sw_fv_handler); -		if (!fv) -			break; -		ice_seg = NULL; - -		/* If field vector is not in the bitmap list, then skip this -		 * profile. -		 */ -		if (!test_bit((u16)offset, bm)) -			continue; - -		for (i = 0; i < lkups->n_val_words; i++) { -			int j; - -			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) -				if (fv->ew[j].prot_id == -				    lkups->fv_words[i].prot_id && -				    fv->ew[j].off == lkups->fv_words[i].off) -					break; -			if (j >= hw->blk[ICE_BLK_SW].es.fvw) -				break; -			if (i + 1 == lkups->n_val_words) { -				fvl = devm_kzalloc(ice_hw_to_dev(hw), -						   sizeof(*fvl), GFP_KERNEL); -				if (!fvl) -					goto err; -				fvl->fv_ptr = fv; -				fvl->profile_id = offset; -				list_add(&fvl->list_entry, fv_list); -				break; -			} -		} -	} while (fv); -	if (list_empty(fv_list)) { -		dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package"); -		return -EIO; -	} - -	return 0; - -err: -	list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { -		list_del(&fvl->list_entry); -		devm_kfree(ice_hw_to_dev(hw), fvl); -	} - -	return -ENOMEM; -} - -/** - * ice_init_prof_result_bm - Initialize the profile result index bitmap - * @hw: pointer to hardware structure - */ -void ice_init_prof_result_bm(struct ice_hw *hw) -{ -	struct ice_pkg_enum state; -	struct ice_seg *ice_seg; -	struct ice_fv *fv; - -	memset(&state, 0, sizeof(state)); - -	if (!hw->seg) -		return; - -	ice_seg = hw->seg; -	do { -		u32 off; -		u16 i; - -		fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, -					&off, ice_sw_fv_handler); -		ice_seg = NULL; -		if (!fv) -			break; - -		bitmap_zero(hw->switch_info->prof_res_bm[off], -			    ICE_MAX_FV_WORDS); - -		/* Determine empty field vector indices, these can be -		 * used for recipe results. Skip index 0, since it is -		 * always used for Switch ID. -		 */ -		for (i = 1; i < ICE_MAX_FV_WORDS; i++) -			if (fv->ew[i].prot_id == ICE_PROT_INVALID && -			    fv->ew[i].off == ICE_FV_OFFSET_INVAL) -				set_bit(i, hw->switch_info->prof_res_bm[off]); -	} while (fv); -} - -/** - * ice_pkg_buf_free - * @hw: pointer to the HW structure - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Frees a package buffer - */ -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) -{ -	devm_kfree(ice_hw_to_dev(hw), bld); -} - -/** - * ice_pkg_buf_reserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to reserve - * - * Reserves one or more section table entries in a package buffer. This routine - * can be called multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. - */ -static int -ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) -{ -	struct ice_buf_hdr *buf; -	u16 section_count; -	u16 data_end; - -	if (!bld) -		return -EINVAL; - -	buf = (struct ice_buf_hdr *)&bld->buf; - -	/* already an active section, can't increase table size */ -	section_count = le16_to_cpu(buf->section_count); -	if (section_count > 0) -		return -EIO; - -	if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) -		return -EIO; -	bld->reserved_section_table_entries += count; - -	data_end = le16_to_cpu(buf->data_end) + -		flex_array_size(buf, section_entry, count); -	buf->data_end = cpu_to_le16(data_end); - -	return 0; -} - -/** - * ice_pkg_buf_alloc_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * - * Reserves memory in the buffer for a section's content and updates the - * buffers' status accordingly. This routine returns a pointer to the first - * byte of the section start within the buffer, which is used to fill in the - * section contents. - * Note: all package contents must be in Little Endian form. - */ -static void * -ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) -{ -	struct ice_buf_hdr *buf; -	u16 sect_count; -	u16 data_end; - -	if (!bld || !type || !size) -		return NULL; - -	buf = (struct ice_buf_hdr *)&bld->buf; - -	/* check for enough space left in buffer */ -	data_end = le16_to_cpu(buf->data_end); - -	/* section start must align on 4 byte boundary */ -	data_end = ALIGN(data_end, 4); - -	if ((data_end + size) > ICE_MAX_S_DATA_END) -		return NULL; - -	/* check for more available section table entries */ -	sect_count = le16_to_cpu(buf->section_count); -	if (sect_count < bld->reserved_section_table_entries) { -		void *section_ptr = ((u8 *)buf) + data_end; - -		buf->section_entry[sect_count].offset = cpu_to_le16(data_end); -		buf->section_entry[sect_count].size = cpu_to_le16(size); -		buf->section_entry[sect_count].type = cpu_to_le32(type); - -		data_end += size; -		buf->data_end = cpu_to_le16(data_end); - -		buf->section_count = cpu_to_le16(sect_count + 1); -		return section_ptr; -	} - -	/* no free section table entries */ -	return NULL; -} - -/** - * ice_pkg_buf_alloc_single_section - * @hw: pointer to the HW structure - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * @section: returns pointer to the section - * - * Allocates a package buffer with a single section. - * Note: all package contents must be in Little Endian form. - */ -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, -				 void **section) -{ -	struct ice_buf_build *buf; - -	if (!section) -		return NULL; - -	buf = ice_pkg_buf_alloc(hw); -	if (!buf) -		return NULL; - -	if (ice_pkg_buf_reserve_section(buf, 1)) -		goto ice_pkg_buf_alloc_single_section_err; - -	*section = ice_pkg_buf_alloc_section(buf, type, size); -	if (!*section) -		goto ice_pkg_buf_alloc_single_section_err; - -	return buf; - -ice_pkg_buf_alloc_single_section_err: -	ice_pkg_buf_free(hw, buf); -	return NULL; -} - -/** - * ice_pkg_buf_get_active_sections - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of active sections. Before using the package buffer - * in an update package command, the caller should make sure that there is at - * least one active section - otherwise, the buffer is not legal and should - * not be used. - * Note: all package contents must be in Little Endian form. - */ -static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) -{ -	struct ice_buf_hdr *buf; - -	if (!bld) -		return 0; - -	buf = (struct ice_buf_hdr *)&bld->buf; -	return le16_to_cpu(buf->section_count); -} - -/** - * ice_pkg_buf - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Return a pointer to the buffer's header - */ -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) -{ -	if (!bld) -		return NULL; - -	return &bld->buf; -} - -/**   * ice_get_open_tunnel_port - retrieve an open tunnel port   * @hw: pointer to the HW structure   * @port: returns open port @@ -2297,10 +391,11 @@ ice_upd_dvm_boost_entry_err:   */  int ice_set_dvm_boost_entries(struct ice_hw *hw)  { -	int status;  	u16 i;  	for (i = 0; i < hw->dvm_upd.count; i++) { +		int status; +  		status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);  		if (status)  			return status; @@ -2757,7 +852,6 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)  		count++;  	list_for_each_entry(tmp2, list2, list)  		chk_count++; -	/* cppcheck-suppress knownConditionTrueFalse */  	if (!count || count != chk_count)  		return false; @@ -5102,12 +3196,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,  	u16 idx = vsig & ICE_VSIG_IDX_M;  	struct ice_vsig_vsi *vsi_cur;  	struct ice_vsig_prof *d, *t; -	int status;  	/* remove TCAM entries */  	list_for_each_entry_safe(d, t,  				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,  				 list) { +		int status; +  		status = ice_rem_prof_id(hw, blk, d);  		if (status)  			return status; @@ -5158,12 +3253,13 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,  {  	u16 idx = vsig & ICE_VSIG_IDX_M;  	struct ice_vsig_prof *p, *t; -	int status;  	list_for_each_entry_safe(p, t,  				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,  				 list)  		if (p->profile_cookie == hdl) { +			int status; +  			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)  				/* this is the last profile, remove the VSIG */  				return ice_rem_vsig(hw, blk, vsig, chg); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 9c530c86703e..7af7c8e9aa4e 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -6,75 +6,6 @@  #include "ice_type.h" -/* Package minimal version supported */ -#define ICE_PKG_SUPP_VER_MAJ	1 -#define ICE_PKG_SUPP_VER_MNR	3 - -/* Package format version */ -#define ICE_PKG_FMT_VER_MAJ	1 -#define ICE_PKG_FMT_VER_MNR	0 -#define ICE_PKG_FMT_VER_UPD	0 -#define ICE_PKG_FMT_VER_DFT	0 - -#define ICE_PKG_CNT 4 - -enum ice_ddp_state { -	/* Indicates that this call to ice_init_pkg -	 * successfully loaded the requested DDP package -	 */ -	ICE_DDP_PKG_SUCCESS			= 0, - -	/* Generic error for already loaded errors, it is mapped later to -	 * the more specific one (one of the next 3) -	 */ -	ICE_DDP_PKG_ALREADY_LOADED			= -1, - -	/* Indicates that a DDP package of the same version has already been -	 * loaded onto the device by a previous call or by another PF -	 */ -	ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED		= -2, - -	/* The device has a DDP package that is not supported by the driver */ -	ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED	= -3, - -	/* The device has a compatible package -	 * (but different from the request) already loaded -	 */ -	ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED		= -4, - -	/* The firmware loaded on the device is not compatible with -	 * the DDP package loaded -	 */ -	ICE_DDP_PKG_FW_MISMATCH				= -5, - -	/* The DDP package file is invalid */ -	ICE_DDP_PKG_INVALID_FILE			= -6, - -	/* The version of the DDP package provided is higher than -	 * the driver supports -	 */ -	ICE_DDP_PKG_FILE_VERSION_TOO_HIGH		= -7, - -	/* The version of the DDP package provided is lower than the -	 * driver supports -	 */ -	ICE_DDP_PKG_FILE_VERSION_TOO_LOW		= -8, - -	/* The signature of the DDP package file provided is invalid */ -	ICE_DDP_PKG_FILE_SIGNATURE_INVALID		= -9, - -	/* The DDP package file security revision is too low and not -	 * supported by firmware -	 */ -	ICE_DDP_PKG_FILE_REVISION_TOO_LOW		= -10, - -	/* An error occurred in firmware while loading the DDP package */ -	ICE_DDP_PKG_LOAD_ERROR				= -11, - -	/* Other errors */ -	ICE_DDP_PKG_ERR					= -12 -}; -  int  ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);  void ice_release_change_lock(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 974d14a83b2e..4f42e14ed3ae 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -3,205 +3,7 @@  #ifndef _ICE_FLEX_TYPE_H_  #define _ICE_FLEX_TYPE_H_ - -#define ICE_FV_OFFSET_INVAL	0x1FF - -/* Extraction Sequence (Field Vector) Table */ -struct ice_fv_word { -	u8 prot_id; -	u16 off;		/* Offset within the protocol header */ -	u8 resvrd; -} __packed; - -#define ICE_MAX_NUM_PROFILES 256 - -#define ICE_MAX_FV_WORDS 48 -struct ice_fv { -	struct ice_fv_word ew[ICE_MAX_FV_WORDS]; -}; - -/* Package and segment headers and tables */ -struct ice_pkg_hdr { -	struct ice_pkg_ver pkg_format_ver; -	__le32 seg_count; -	__le32 seg_offset[]; -}; - -/* generic segment */ -struct ice_generic_seg_hdr { -#define SEGMENT_TYPE_METADATA	0x00000001 -#define SEGMENT_TYPE_ICE	0x00000010 -	__le32 seg_type; -	struct ice_pkg_ver seg_format_ver; -	__le32 seg_size; -	char seg_id[ICE_PKG_NAME_SIZE]; -}; - -/* ice specific segment */ - -union ice_device_id { -	struct { -		__le16 device_id; -		__le16 vendor_id; -	} dev_vend_id; -	__le32 id; -}; - -struct ice_device_id_entry { -	union ice_device_id device; -	union ice_device_id sub_device; -}; - -struct ice_seg { -	struct ice_generic_seg_hdr hdr; -	__le32 device_table_count; -	struct ice_device_id_entry device_table[]; -}; - -struct ice_nvm_table { -	__le32 table_count; -	__le32 vers[]; -}; - -struct ice_buf { -#define ICE_PKG_BUF_SIZE	4096 -	u8 buf[ICE_PKG_BUF_SIZE]; -}; - -struct ice_buf_table { -	__le32 buf_count; -	struct ice_buf buf_array[]; -}; - -/* global metadata specific segment */ -struct ice_global_metadata_seg { -	struct ice_generic_seg_hdr hdr; -	struct ice_pkg_ver pkg_ver; -	__le32 rsvd; -	char pkg_name[ICE_PKG_NAME_SIZE]; -}; - -#define ICE_MIN_S_OFF		12 -#define ICE_MAX_S_OFF		4095 -#define ICE_MIN_S_SZ		1 -#define ICE_MAX_S_SZ		4084 - -/* section information */ -struct ice_section_entry { -	__le32 type; -	__le16 offset; -	__le16 size; -}; - -#define ICE_MIN_S_COUNT		1 -#define ICE_MAX_S_COUNT		511 -#define ICE_MIN_S_DATA_END	12 -#define ICE_MAX_S_DATA_END	4096 - -#define ICE_METADATA_BUF	0x80000000 - -struct ice_buf_hdr { -	__le16 section_count; -	__le16 data_end; -	struct ice_section_entry section_entry[]; -}; - -#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ -	struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ -	(ent_sz)) - -/* ice package section IDs */ -#define ICE_SID_METADATA		1 -#define ICE_SID_XLT0_SW			10 -#define ICE_SID_XLT_KEY_BUILDER_SW	11 -#define ICE_SID_XLT1_SW			12 -#define ICE_SID_XLT2_SW			13 -#define ICE_SID_PROFID_TCAM_SW		14 -#define ICE_SID_PROFID_REDIR_SW		15 -#define ICE_SID_FLD_VEC_SW		16 -#define ICE_SID_CDID_KEY_BUILDER_SW	17 - -struct ice_meta_sect { -	struct ice_pkg_ver ver; -#define ICE_META_SECT_NAME_SIZE	28 -	char name[ICE_META_SECT_NAME_SIZE]; -	__le32 track_id; -}; - -#define ICE_SID_CDID_REDIR_SW		18 - -#define ICE_SID_XLT0_ACL		20 -#define ICE_SID_XLT_KEY_BUILDER_ACL	21 -#define ICE_SID_XLT1_ACL		22 -#define ICE_SID_XLT2_ACL		23 -#define ICE_SID_PROFID_TCAM_ACL		24 -#define ICE_SID_PROFID_REDIR_ACL	25 -#define ICE_SID_FLD_VEC_ACL		26 -#define ICE_SID_CDID_KEY_BUILDER_ACL	27 -#define ICE_SID_CDID_REDIR_ACL		28 - -#define ICE_SID_XLT0_FD			30 -#define ICE_SID_XLT_KEY_BUILDER_FD	31 -#define ICE_SID_XLT1_FD			32 -#define ICE_SID_XLT2_FD			33 -#define ICE_SID_PROFID_TCAM_FD		34 -#define ICE_SID_PROFID_REDIR_FD		35 -#define ICE_SID_FLD_VEC_FD		36 -#define ICE_SID_CDID_KEY_BUILDER_FD	37 -#define ICE_SID_CDID_REDIR_FD		38 - -#define ICE_SID_XLT0_RSS		40 -#define ICE_SID_XLT_KEY_BUILDER_RSS	41 -#define ICE_SID_XLT1_RSS		42 -#define ICE_SID_XLT2_RSS		43 -#define ICE_SID_PROFID_TCAM_RSS		44 -#define ICE_SID_PROFID_REDIR_RSS	45 -#define ICE_SID_FLD_VEC_RSS		46 -#define ICE_SID_CDID_KEY_BUILDER_RSS	47 -#define ICE_SID_CDID_REDIR_RSS		48 - -#define ICE_SID_RXPARSER_MARKER_PTYPE	55 -#define ICE_SID_RXPARSER_BOOST_TCAM	56 -#define ICE_SID_RXPARSER_METADATA_INIT	58 -#define ICE_SID_TXPARSER_BOOST_TCAM	66 - -#define ICE_SID_XLT0_PE			80 -#define ICE_SID_XLT_KEY_BUILDER_PE	81 -#define ICE_SID_XLT1_PE			82 -#define ICE_SID_XLT2_PE			83 -#define ICE_SID_PROFID_TCAM_PE		84 -#define ICE_SID_PROFID_REDIR_PE		85 -#define ICE_SID_FLD_VEC_PE		86 -#define ICE_SID_CDID_KEY_BUILDER_PE	87 -#define ICE_SID_CDID_REDIR_PE		88 - -/* Label Metadata section IDs */ -#define ICE_SID_LBL_FIRST		0x80000010 -#define ICE_SID_LBL_RXPARSER_TMEM	0x80000018 -/* The following define MUST be updated to reflect the last label section ID */ -#define ICE_SID_LBL_LAST		0x80000038 - -enum ice_block { -	ICE_BLK_SW = 0, -	ICE_BLK_ACL, -	ICE_BLK_FD, -	ICE_BLK_RSS, -	ICE_BLK_PE, -	ICE_BLK_COUNT -}; - -enum ice_sect { -	ICE_XLT0 = 0, -	ICE_XLT_KB, -	ICE_XLT1, -	ICE_XLT2, -	ICE_PROF_TCAM, -	ICE_PROF_REDIR, -	ICE_VEC_TBL, -	ICE_CDID_KB, -	ICE_CDID_REDIR, -	ICE_SECT_COUNT -}; +#include "ice_ddp.h"  /* Packet Type (PTYPE) values */  #define ICE_PTYPE_MAC_PAY		1 @@ -283,134 +85,6 @@ struct ice_ptype_attributes {  	enum ice_ptype_attrib_type attrib;  }; -/* package labels */ -struct ice_label { -	__le16 value; -#define ICE_PKG_LABEL_SIZE	64 -	char name[ICE_PKG_LABEL_SIZE]; -}; - -struct ice_label_section { -	__le16 count; -	struct ice_label label[]; -}; - -#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ -	struct_size((struct ice_label_section *)0, label, 1) - \ -	sizeof(struct ice_label), sizeof(struct ice_label)) - -struct ice_sw_fv_section { -	__le16 count; -	__le16 base_offset; -	struct ice_fv fv[]; -}; - -struct ice_sw_fv_list_entry { -	struct list_head list_entry; -	u32 profile_id; -	struct ice_fv *fv_ptr; -}; - -/* The BOOST TCAM stores the match packet header in reverse order, meaning - * the fields are reversed; in addition, this means that the normally big endian - * fields of the packet are now little endian. - */ -struct ice_boost_key_value { -#define ICE_BOOST_REMAINING_HV_KEY	15 -	u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; -	__le16 hv_dst_port_key; -	__le16 hv_src_port_key; -	u8 tcam_search_key; -} __packed; - -struct ice_boost_key { -	struct ice_boost_key_value key; -	struct ice_boost_key_value key2; -}; - -/* package Boost TCAM entry */ -struct ice_boost_tcam_entry { -	__le16 addr; -	__le16 reserved; -	/* break up the 40 bytes of key into different fields */ -	struct ice_boost_key key; -	u8 boost_hit_index_group; -	/* The following contains bitfields which are not on byte boundaries. -	 * These fields are currently unused by driver software. -	 */ -#define ICE_BOOST_BIT_FIELDS		43 -	u8 bit_fields[ICE_BOOST_BIT_FIELDS]; -}; - -struct ice_boost_tcam_section { -	__le16 count; -	__le16 reserved; -	struct ice_boost_tcam_entry tcam[]; -}; - -#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ -	struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \ -	sizeof(struct ice_boost_tcam_entry), \ -	sizeof(struct ice_boost_tcam_entry)) - -/* package Marker Ptype TCAM entry */ -struct ice_marker_ptype_tcam_entry { -#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX	1024 -	__le16 addr; -	__le16 ptype; -	u8 keys[20]; -}; - -struct ice_marker_ptype_tcam_section { -	__le16 count; -	__le16 reserved; -	struct ice_marker_ptype_tcam_entry tcam[]; -}; - -#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF	\ -	ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \ -	sizeof(struct ice_marker_ptype_tcam_entry), \ -	sizeof(struct ice_marker_ptype_tcam_entry)) - -struct ice_xlt1_section { -	__le16 count; -	__le16 offset; -	u8 value[]; -}; - -struct ice_xlt2_section { -	__le16 count; -	__le16 offset; -	__le16 value[]; -}; - -struct ice_prof_redir_section { -	__le16 count; -	__le16 offset; -	u8 redir_value[]; -}; - -/* package buffer building */ - -struct ice_buf_build { -	struct ice_buf buf; -	u16 reserved_section_table_entries; -}; - -struct ice_pkg_enum { -	struct ice_buf_table *buf_table; -	u32 buf_idx; - -	u32 type; -	struct ice_buf_hdr *buf; -	u32 sect_idx; -	void *sect; -	u32 sect_type; - -	u32 entry_idx; -	void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); -}; -  /* Tunnel enabling */  enum ice_tunnel_type { diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index 40e678cfb507..aff7a141c30d 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -208,6 +208,11 @@ static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)  void ice_fltr_remove_all(struct ice_vsi *vsi)  {  	ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx); +	/* sync netdev filters if exist */ +	if (vsi->netdev) { +		__dev_uc_unsync(vsi->netdev, NULL); +		__dev_mc_unsync(vsi->netdev, NULL); +	}  }  /** diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index b5a7f246d230..8dec748bb53a 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -3,15 +3,18 @@  #include "ice.h"  #include "ice_lib.h" -#include <linux/tty_driver.h>  /** - * ice_gnss_do_write - Write data to internal GNSS + * ice_gnss_do_write - Write data to internal GNSS receiver   * @pf: board private structure   * @buf: command buffer   * @size: command buffer size   *   * Write UBX command data to the GNSS receiver + * + * Return: + * * number of bytes written - success + * * negative - error code   */  static unsigned int  ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size) @@ -82,6 +85,12 @@ static void ice_gnss_write_pending(struct kthread_work *work)  						write_work);  	struct ice_pf *pf = gnss->back; +	if (!pf) +		return; + +	if (!test_bit(ICE_FLAG_GNSS, pf->flags)) +		return; +  	if (!list_empty(&gnss->queue)) {  		struct gnss_write_buf *write_buf = NULL;  		unsigned int bytes; @@ -102,16 +111,14 @@ static void ice_gnss_write_pending(struct kthread_work *work)   * ice_gnss_read - Read data from internal GNSS module   * @work: GNSS read work structure   * - * Read the data from internal GNSS receiver, number of bytes read will be - * returned in *read_data parameter. + * Read the data from internal GNSS receiver, write it to gnss_dev.   */  static void ice_gnss_read(struct kthread_work *work)  {  	struct gnss_serial *gnss = container_of(work, struct gnss_serial,  						read_work.work); +	unsigned int i, bytes_read, data_len, count;  	struct ice_aqc_link_topo_addr link_topo; -	unsigned int i, bytes_read, data_len; -	struct tty_port *port;  	struct ice_pf *pf;  	struct ice_hw *hw;  	__be16 data_len_b; @@ -120,14 +127,15 @@ static void ice_gnss_read(struct kthread_work *work)  	int err = 0;  	pf = gnss->back; -	if (!pf || !gnss->tty || !gnss->tty->port) { +	if (!pf) {  		err = -EFAULT;  		goto exit;  	} -	hw = &pf->hw; -	port = gnss->tty->port; +	if (!test_bit(ICE_FLAG_GNSS, pf->flags)) +		return; +	hw = &pf->hw;  	buf = (char *)get_zeroed_page(GFP_KERNEL);  	if (!buf) {  		err = -ENOMEM; @@ -159,7 +167,6 @@ static void ice_gnss_read(struct kthread_work *work)  	}  	data_len = min_t(typeof(data_len), data_len, PAGE_SIZE); -	data_len = tty_buffer_request_room(port, data_len);  	if (!data_len) {  		err = -ENOMEM;  		goto exit_buf; @@ -179,12 +186,11 @@ static void ice_gnss_read(struct kthread_work *work)  			goto exit_buf;  	} -	/* Send the data to the tty layer for users to read. This doesn't -	 * actually push the data through unless tty->low_latency is set. -	 */ -	tty_insert_flip_string(port, buf, i); -	tty_flip_buffer_push(port); - +	count = gnss_insert_raw(pf->gnss_dev, buf, i); +	if (count != i) +		dev_warn(ice_pf_to_dev(pf), +			 "gnss_insert_raw ret=%d size=%d\n", +			 count, i);  exit_buf:  	free_page((unsigned long)buf);  	kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, @@ -195,11 +201,16 @@ exit:  }  /** - * ice_gnss_struct_init - Initialize GNSS structure for the TTY + * ice_gnss_struct_init - Initialize GNSS receiver   * @pf: Board private structure - * @index: TTY device index + * + * Initialize GNSS structures and workers. + * + * Return: + * * pointer to initialized gnss_serial struct - success + * * NULL - error   */ -static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index) +static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)  {  	struct device *dev = ice_pf_to_dev(pf);  	struct kthread_worker *kworker; @@ -209,17 +220,12 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)  	if (!gnss)  		return NULL; -	mutex_init(&gnss->gnss_mutex); -	gnss->open_count = 0;  	gnss->back = pf; -	pf->gnss_serial[index] = gnss; +	pf->gnss_serial = gnss;  	kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);  	INIT_LIST_HEAD(&gnss->queue);  	kthread_init_work(&gnss->write_work, ice_gnss_write_pending); -	/* Allocate a kworker for handling work required for the GNSS TTY -	 * writes. -	 */  	kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));  	if (IS_ERR(kworker)) {  		kfree(gnss); @@ -232,139 +238,100 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)  }  /** - * ice_gnss_tty_open - Initialize GNSS structures on TTY device open - * @tty: pointer to the tty_struct - * @filp: pointer to the file + * ice_gnss_open - Open GNSS device + * @gdev: pointer to the gnss device struct   * - * This routine is mandatory. If this routine is not filled in, the attempted - * open will fail with ENODEV. + * Open GNSS device and start filling the read buffer for consumer. + * + * Return: + * * 0 - success + * * negative - error code   */ -static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp) +static int ice_gnss_open(struct gnss_device *gdev)  { +	struct ice_pf *pf = gnss_get_drvdata(gdev);  	struct gnss_serial *gnss; -	struct ice_pf *pf; -	pf = (struct ice_pf *)tty->driver->driver_state;  	if (!pf)  		return -EFAULT; -	/* Clear the pointer in case something fails */ -	tty->driver_data = NULL; - -	/* Get the serial object associated with this tty pointer */ -	gnss = pf->gnss_serial[tty->index]; -	if (!gnss) { -		/* Initialize GNSS struct on the first device open */ -		gnss = ice_gnss_struct_init(pf, tty->index); -		if (!gnss) -			return -ENOMEM; -	} +	if (!test_bit(ICE_FLAG_GNSS, pf->flags)) +		return -EFAULT; -	mutex_lock(&gnss->gnss_mutex); +	gnss = pf->gnss_serial; +	if (!gnss) +		return -ENODEV; -	/* Save our structure within the tty structure */ -	tty->driver_data = gnss; -	gnss->tty = tty; -	gnss->open_count++;  	kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0); -	mutex_unlock(&gnss->gnss_mutex); -  	return 0;  }  /** - * ice_gnss_tty_close - Cleanup GNSS structures on tty device close - * @tty: pointer to the tty_struct - * @filp: pointer to the file + * ice_gnss_close - Close GNSS device + * @gdev: pointer to the gnss device struct + * + * Close GNSS device, cancel worker, stop filling the read buffer.   */ -static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp) +static void ice_gnss_close(struct gnss_device *gdev)  { -	struct gnss_serial *gnss = tty->driver_data; -	struct ice_pf *pf; - -	if (!gnss) -		return; +	struct ice_pf *pf = gnss_get_drvdata(gdev); +	struct gnss_serial *gnss; -	pf = (struct ice_pf *)tty->driver->driver_state;  	if (!pf)  		return; -	mutex_lock(&gnss->gnss_mutex); - -	if (!gnss->open_count) { -		/* Port was never opened */ -		dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n"); -		goto exit; -	} +	gnss = pf->gnss_serial; +	if (!gnss) +		return; -	gnss->open_count--; -	if (gnss->open_count <= 0) { -		/* Port is in shutdown state */ -		kthread_cancel_delayed_work_sync(&gnss->read_work); -	} -exit: -	mutex_unlock(&gnss->gnss_mutex); +	kthread_cancel_work_sync(&gnss->write_work); +	kthread_cancel_delayed_work_sync(&gnss->read_work);  }  /** - * ice_gnss_tty_write - Write GNSS data - * @tty: pointer to the tty_struct + * ice_gnss_write - Write to GNSS device + * @gdev: pointer to the gnss device struct   * @buf: pointer to the user data - * @count: the number of characters queued to be sent to the HW + * @count: size of the buffer to be sent to the GNSS device   * - * The write function call is called by the user when there is data to be sent - * to the hardware. First the tty core receives the call, and then it passes the - * data on to the tty driver's write function. The tty core also tells the tty - * driver the size of the data being sent. - * If any errors happen during the write call, a negative error value should be - * returned instead of the number of characters queued to be written. + * Return: + * * number of written bytes - success + * * negative - error code   */  static int -ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf, +	       size_t count)  { +	struct ice_pf *pf = gnss_get_drvdata(gdev);  	struct gnss_write_buf *write_buf;  	struct gnss_serial *gnss;  	unsigned char *cmd_buf; -	struct ice_pf *pf;  	int err = count;  	/* We cannot write a single byte using our I2C implementation. */  	if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)  		return -EINVAL; -	gnss = tty->driver_data; -	if (!gnss) -		return -EFAULT; - -	pf = (struct ice_pf *)tty->driver->driver_state;  	if (!pf)  		return -EFAULT; -	/* Only allow to write on TTY 0 */ -	if (gnss != pf->gnss_serial[0]) -		return -EIO; - -	mutex_lock(&gnss->gnss_mutex); +	if (!test_bit(ICE_FLAG_GNSS, pf->flags)) +		return -EFAULT; -	if (!gnss->open_count) { -		err = -EINVAL; -		goto exit; -	} +	gnss = pf->gnss_serial; +	if (!gnss) +		return -ENODEV;  	cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL); -	if (!cmd_buf) { -		err = -ENOMEM; -		goto exit; -	} +	if (!cmd_buf) +		return -ENOMEM;  	memcpy(cmd_buf, buf, count); - -	/* Send the data out to a hardware port */  	write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);  	if (!write_buf) { -		err = -ENOMEM; -		goto exit; +		kfree(cmd_buf); +		return -ENOMEM;  	}  	write_buf->buf = cmd_buf; @@ -372,136 +339,89 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)  	INIT_LIST_HEAD(&write_buf->queue);  	list_add_tail(&write_buf->queue, &gnss->queue);  	kthread_queue_work(gnss->kworker, &gnss->write_work); -exit: -	mutex_unlock(&gnss->gnss_mutex); +  	return err;  } +static const struct gnss_operations ice_gnss_ops = { +	.open = ice_gnss_open, +	.close = ice_gnss_close, +	.write_raw = ice_gnss_write, +}; +  /** - * ice_gnss_tty_write_room - Returns the numbers of characters to be written. - * @tty: pointer to the tty_struct + * ice_gnss_register - Register GNSS receiver + * @pf: Board private structure + * + * Allocate and register GNSS receiver in the Linux GNSS subsystem.   * - * This routine returns the numbers of characters the tty driver will accept - * for queuing to be written or 0 if either the TTY is not open or user - * tries to write to the TTY other than the first. + * Return: + * * 0 - success + * * negative - error code   */ -static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty) +static int ice_gnss_register(struct ice_pf *pf)  { -	struct gnss_serial *gnss = tty->driver_data; - -	/* Only allow to write on TTY 0 */ -	if (!gnss || gnss != gnss->back->gnss_serial[0]) -		return 0; - -	mutex_lock(&gnss->gnss_mutex); +	struct gnss_device *gdev; +	int ret; + +	gdev = gnss_allocate_device(ice_pf_to_dev(pf)); +	if (!gdev) { +		dev_err(ice_pf_to_dev(pf), +			"gnss_allocate_device returns NULL\n"); +		return -ENOMEM; +	} -	if (!gnss->open_count) { -		mutex_unlock(&gnss->gnss_mutex); -		return 0; +	gdev->ops = &ice_gnss_ops; +	gdev->type = GNSS_TYPE_UBX; +	gnss_set_drvdata(gdev, pf); +	ret = gnss_register_device(gdev); +	if (ret) { +		dev_err(ice_pf_to_dev(pf), "gnss_register_device err=%d\n", +			ret); +		gnss_put_device(gdev); +	} else { +		pf->gnss_dev = gdev;  	} -	mutex_unlock(&gnss->gnss_mutex); -	return ICE_GNSS_TTY_WRITE_BUF; +	return ret;  } -static const struct tty_operations tty_gps_ops = { -	.open =		ice_gnss_tty_open, -	.close =	ice_gnss_tty_close, -	.write =	ice_gnss_tty_write, -	.write_room =	ice_gnss_tty_write_room, -}; -  /** - * ice_gnss_create_tty_driver - Create a TTY driver for GNSS + * ice_gnss_deregister - Deregister GNSS receiver   * @pf: Board private structure + * + * Deregister GNSS receiver from the Linux GNSS subsystem, + * release its resources.   */ -static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf) +static void ice_gnss_deregister(struct ice_pf *pf)  { -	struct device *dev = ice_pf_to_dev(pf); -	const int ICE_TTYDRV_NAME_MAX = 14; -	struct tty_driver *tty_driver; -	char *ttydrv_name; -	unsigned int i; -	int err; - -	tty_driver = tty_alloc_driver(ICE_GNSS_TTY_MINOR_DEVICES, -				      TTY_DRIVER_REAL_RAW); -	if (IS_ERR(tty_driver)) { -		dev_err(dev, "Failed to allocate memory for GNSS TTY\n"); -		return NULL; -	} - -	ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL); -	if (!ttydrv_name) { -		tty_driver_kref_put(tty_driver); -		return NULL; -	} - -	snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_", -		 (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn)); - -	/* Initialize the tty driver*/ -	tty_driver->owner = THIS_MODULE; -	tty_driver->driver_name = dev_driver_string(dev); -	tty_driver->name = (const char *)ttydrv_name; -	tty_driver->type = TTY_DRIVER_TYPE_SERIAL; -	tty_driver->subtype = SERIAL_TYPE_NORMAL; -	tty_driver->init_termios = tty_std_termios; -	tty_driver->init_termios.c_iflag &= ~INLCR; -	tty_driver->init_termios.c_iflag |= IGNCR; -	tty_driver->init_termios.c_oflag &= ~OPOST; -	tty_driver->init_termios.c_lflag &= ~ICANON; -	tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX); -	/* baud rate 9600 */ -	tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600); -	tty_driver->driver_state = pf; -	tty_set_operations(tty_driver, &tty_gps_ops); - -	for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { -		pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]), -					       GFP_KERNEL); -		pf->gnss_serial[i] = NULL; - -		tty_port_init(pf->gnss_tty_port[i]); -		tty_port_link_device(pf->gnss_tty_port[i], tty_driver, i); -	} - -	err = tty_register_driver(tty_driver); -	if (err) { -		dev_err(dev, "Failed to register TTY driver err=%d\n", err); - -		for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { -			tty_port_destroy(pf->gnss_tty_port[i]); -			kfree(pf->gnss_tty_port[i]); -		} -		kfree(ttydrv_name); -		tty_driver_kref_put(pf->ice_gnss_tty_driver); - -		return NULL; +	if (pf->gnss_dev) { +		gnss_deregister_device(pf->gnss_dev); +		gnss_put_device(pf->gnss_dev); +		pf->gnss_dev = NULL;  	} - -	for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) -		dev_info(dev, "%s%d registered\n", ttydrv_name, i); - -	return tty_driver;  }  /** - * ice_gnss_init - Initialize GNSS TTY support + * ice_gnss_init - Initialize GNSS support   * @pf: Board private structure   */  void ice_gnss_init(struct ice_pf *pf)  { -	struct tty_driver *tty_driver; +	int ret; -	tty_driver = ice_gnss_create_tty_driver(pf); -	if (!tty_driver) +	pf->gnss_serial = ice_gnss_struct_init(pf); +	if (!pf->gnss_serial)  		return; -	pf->ice_gnss_tty_driver = tty_driver; - -	set_bit(ICE_FLAG_GNSS, pf->flags); -	dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n"); +	ret = ice_gnss_register(pf); +	if (!ret) { +		set_bit(ICE_FLAG_GNSS, pf->flags); +		dev_info(ice_pf_to_dev(pf), "GNSS init successful\n"); +	} else { +		ice_gnss_exit(pf); +		dev_err(ice_pf_to_dev(pf), "GNSS init failure\n"); +	}  }  /** @@ -510,31 +430,20 @@ void ice_gnss_init(struct ice_pf *pf)   */  void ice_gnss_exit(struct ice_pf *pf)  { -	unsigned int i; +	ice_gnss_deregister(pf); +	clear_bit(ICE_FLAG_GNSS, pf->flags); -	if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver) -		return; - -	for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { -		if (pf->gnss_tty_port[i]) { -			tty_port_destroy(pf->gnss_tty_port[i]); -			kfree(pf->gnss_tty_port[i]); -		} +	if (pf->gnss_serial) { +		struct gnss_serial *gnss = pf->gnss_serial; -		if (pf->gnss_serial[i]) { -			struct gnss_serial *gnss = pf->gnss_serial[i]; +		kthread_cancel_work_sync(&gnss->write_work); +		kthread_cancel_delayed_work_sync(&gnss->read_work); +		kthread_destroy_worker(gnss->kworker); +		gnss->kworker = NULL; -			kthread_cancel_work_sync(&gnss->write_work); -			kthread_cancel_delayed_work_sync(&gnss->read_work); -			kfree(gnss); -			pf->gnss_serial[i] = NULL; -		} +		kfree(gnss); +		pf->gnss_serial = NULL;  	} - -	tty_unregister_driver(pf->ice_gnss_tty_driver); -	kfree(pf->ice_gnss_tty_driver->name); -	tty_driver_kref_put(pf->ice_gnss_tty_driver); -	pf->ice_gnss_tty_driver = NULL;  }  /** diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h index f454dd1d9285..31db0701d13f 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.h +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -4,15 +4,8 @@  #ifndef _ICE_GNSS_H_  #define _ICE_GNSS_H_ -#include <linux/tty.h> -#include <linux/tty_flip.h> -  #define ICE_E810T_GNSS_I2C_BUS		0x2  #define ICE_GNSS_TIMER_DELAY_TIME	(HZ / 10) /* 0.1 second per message */ -/* Create 2 minor devices, both using the same GNSS module. First one is RW, - * second one RO. - */ -#define ICE_GNSS_TTY_MINOR_DEVICES	2  #define ICE_GNSS_TTY_WRITE_BUF		250  #define ICE_MAX_I2C_DATA_SIZE		FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)  #define ICE_MAX_I2C_WRITE_BYTES		4 @@ -36,13 +29,9 @@ struct gnss_write_buf {  	unsigned char *buf;  }; -  /**   * struct gnss_serial - data used to initialize GNSS TTY port   * @back: back pointer to PF - * @tty: pointer to the tty for this device - * @open_count: number of times this port has been opened - * @gnss_mutex: gnss_mutex used to protect GNSS serial operations   * @kworker: kwork thread for handling periodic work   * @read_work: read_work function for handling GNSS reads   * @write_work: write_work function for handling GNSS writes @@ -50,16 +39,13 @@ struct gnss_write_buf {   */  struct gnss_serial {  	struct ice_pf *back; -	struct tty_struct *tty; -	int open_count; -	struct mutex gnss_mutex; /* protects GNSS serial structure */  	struct kthread_worker *kworker;  	struct kthread_delayed_work read_work;  	struct kthread_work write_work;  	struct list_head queue;  }; -#if IS_ENABLED(CONFIG_TTY) +#if IS_ENABLED(CONFIG_ICE_GNSS)  void ice_gnss_init(struct ice_pf *pf);  void ice_gnss_exit(struct ice_pf *pf);  bool ice_gnss_is_gps_present(struct ice_hw *hw); @@ -70,5 +56,5 @@ static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)  {  	return false;  } -#endif /* IS_ENABLED(CONFIG_TTY) */ +#endif /* IS_ENABLED(CONFIG_ICE_GNSS) */  #endif /* _ICE_GNSS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index d16738a3d3a7..a92dc9a16035 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -110,6 +110,9 @@  #define PRTDCB_TUP2TC				0x001D26C0  #define GL_PREEXT_L2_PMASK0(_i)			(0x0020F0FC + ((_i) * 4))  #define GL_PREEXT_L2_PMASK1(_i)			(0x0020F108 + ((_i) * 4)) +#define GLFLXP_RXDID_FLAGS(_i, _j)              (0x0045D000 + ((_i) * 4 + (_j) * 256)) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S       0 +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M       ICE_M(0x3F, 0)  #define GLFLXP_RXDID_FLX_WRD_0(_i)		(0x0045c800 + ((_i) * 4))  #define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S	0  #define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M	ICE_M(0xFF, 0) diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 895c32bcc8b5..e6bc2285071e 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -6,6 +6,8 @@  #include "ice_lib.h"  #include "ice_dcb_lib.h" +static DEFINE_XARRAY_ALLOC1(ice_aux_id); +  /**   * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct   * @pf: pointer to PF struct @@ -246,6 +248,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)  }  /** + * ice_free_rdma_qvector - free vector resources reserved for RDMA driver + * @pf: board private structure to initialize + */ +static void ice_free_rdma_qvector(struct ice_pf *pf) +{ +	pf->num_avail_sw_msix -= pf->num_rdma_msix; +	ice_free_res(pf->irq_tracker, pf->rdma_base_vector, +		     ICE_RES_RDMA_VEC_ID); +} + +/**   * ice_adev_release - function to be mapped to AUX dev's release op   * @dev: pointer to device to free   */ @@ -331,12 +344,48 @@ int ice_init_rdma(struct ice_pf *pf)  	struct device *dev = &pf->pdev->dev;  	int ret; +	if (!ice_is_rdma_ena(pf)) { +		dev_warn(dev, "RDMA is not supported on this device\n"); +		return 0; +	} + +	ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX), +		       GFP_KERNEL); +	if (ret) { +		dev_err(dev, "Failed to allocate device ID for AUX driver\n"); +		return -ENOMEM; +	} +  	/* Reserve vector resources */  	ret = ice_reserve_rdma_qvector(pf);  	if (ret < 0) {  		dev_err(dev, "failed to reserve vectors for RDMA\n"); -		return ret; +		goto err_reserve_rdma_qvector;  	}  	pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; -	return ice_plug_aux_dev(pf); +	ret = ice_plug_aux_dev(pf); +	if (ret) +		goto err_plug_aux_dev; +	return 0; + +err_plug_aux_dev: +	ice_free_rdma_qvector(pf); +err_reserve_rdma_qvector: +	pf->adev = NULL; +	xa_erase(&ice_aux_id, pf->aux_idx); +	return ret; +} + +/** + * ice_deinit_rdma - deinitialize RDMA on PF + * @pf: ptr to ice_pf + */ +void ice_deinit_rdma(struct ice_pf *pf) +{ +	if (!ice_is_rdma_ena(pf)) +		return; + +	ice_unplug_aux_dev(pf); +	ice_free_rdma_qvector(pf); +	xa_erase(&ice_aux_id, pf->aux_idx);  } diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index b3baf7c3f910..89f986a75cc8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -908,17 +908,5 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)  	return ice_ptype_lkup[ptype];  } -#define ICE_LINK_SPEED_UNKNOWN		0 -#define ICE_LINK_SPEED_10MBPS		10 -#define ICE_LINK_SPEED_100MBPS		100 -#define ICE_LINK_SPEED_1000MBPS		1000 -#define ICE_LINK_SPEED_2500MBPS		2500 -#define ICE_LINK_SPEED_5000MBPS		5000 -#define ICE_LINK_SPEED_10000MBPS	10000 -#define ICE_LINK_SPEED_20000MBPS	20000 -#define ICE_LINK_SPEED_25000MBPS	25000 -#define ICE_LINK_SPEED_40000MBPS	40000 -#define ICE_LINK_SPEED_50000MBPS	50000 -#define ICE_LINK_SPEED_100000MBPS	100000  #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 7276badfa19e..781475480ff2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -166,14 +166,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)  /**   * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI   * @vsi: the VSI being configured - * @vf: the VF associated with this VSI, if any   *   * Return 0 on success and a negative value on error   */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf) +static void ice_vsi_set_num_qs(struct ice_vsi *vsi)  {  	enum ice_vsi_type vsi_type = vsi->type;  	struct ice_pf *pf = vsi->back; +	struct ice_vf *vf = vsi->vf;  	if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))  		return; @@ -282,10 +282,10 @@ static int ice_get_free_slot(void *array, int size, int curr)  }  /** - * ice_vsi_delete - delete a VSI from the switch + * ice_vsi_delete_from_hw - delete a VSI from the switch   * @vsi: pointer to VSI being removed   */ -void ice_vsi_delete(struct ice_vsi *vsi) +static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)  {  	struct ice_pf *pf = vsi->back;  	struct ice_vsi_ctx *ctxt; @@ -348,47 +348,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)  }  /** - * ice_vsi_clear - clean up and deallocate the provided VSI + * ice_vsi_free_stats - Free the ring statistics structures + * @vsi: VSI pointer + */ +static void ice_vsi_free_stats(struct ice_vsi *vsi) +{ +	struct ice_vsi_stats *vsi_stat; +	struct ice_pf *pf = vsi->back; +	int i; + +	if (vsi->type == ICE_VSI_CHNL) +		return; +	if (!pf->vsi_stats) +		return; + +	vsi_stat = pf->vsi_stats[vsi->idx]; +	if (!vsi_stat) +		return; + +	ice_for_each_alloc_txq(vsi, i) { +		if (vsi_stat->tx_ring_stats[i]) { +			kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); +			WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); +		} +	} + +	ice_for_each_alloc_rxq(vsi, i) { +		if (vsi_stat->rx_ring_stats[i]) { +			kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); +			WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); +		} +	} + +	kfree(vsi_stat->tx_ring_stats); +	kfree(vsi_stat->rx_ring_stats); +	kfree(vsi_stat); +	pf->vsi_stats[vsi->idx] = NULL; +} + +/** + * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI + * @vsi: VSI which is having stats allocated + */ +static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) +{ +	struct ice_ring_stats **tx_ring_stats; +	struct ice_ring_stats **rx_ring_stats; +	struct ice_vsi_stats *vsi_stats; +	struct ice_pf *pf = vsi->back; +	u16 i; + +	vsi_stats = pf->vsi_stats[vsi->idx]; +	tx_ring_stats = vsi_stats->tx_ring_stats; +	rx_ring_stats = vsi_stats->rx_ring_stats; + +	/* Allocate Tx ring stats */ +	ice_for_each_alloc_txq(vsi, i) { +		struct ice_ring_stats *ring_stats; +		struct ice_tx_ring *ring; + +		ring = vsi->tx_rings[i]; +		ring_stats = tx_ring_stats[i]; + +		if (!ring_stats) { +			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); +			if (!ring_stats) +				goto err_out; + +			WRITE_ONCE(tx_ring_stats[i], ring_stats); +		} + +		ring->ring_stats = ring_stats; +	} + +	/* Allocate Rx ring stats */ +	ice_for_each_alloc_rxq(vsi, i) { +		struct ice_ring_stats *ring_stats; +		struct ice_rx_ring *ring; + +		ring = vsi->rx_rings[i]; +		ring_stats = rx_ring_stats[i]; + +		if (!ring_stats) { +			ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); +			if (!ring_stats) +				goto err_out; + +			WRITE_ONCE(rx_ring_stats[i], ring_stats); +		} + +		ring->ring_stats = ring_stats; +	} + +	return 0; + +err_out: +	ice_vsi_free_stats(vsi); +	return -ENOMEM; +} + +/** + * ice_vsi_free - clean up and deallocate the provided VSI   * @vsi: pointer to VSI being cleared   *   * This deallocates the VSI's queue resources, removes it from the PF's   * VSI array if necessary, and deallocates the VSI - * - * Returns 0 on success, negative on failure   */ -int ice_vsi_clear(struct ice_vsi *vsi) +static void ice_vsi_free(struct ice_vsi *vsi)  {  	struct ice_pf *pf = NULL;  	struct device *dev; -	if (!vsi) -		return 0; - -	if (!vsi->back) -		return -EINVAL; +	if (!vsi || !vsi->back) +		return;  	pf = vsi->back;  	dev = ice_pf_to_dev(pf);  	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {  		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); -		return -EINVAL; +		return;  	}  	mutex_lock(&pf->sw_mutex);  	/* updates the PF for this cleared VSI */  	pf->vsi[vsi->idx] = NULL; -	if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) -		pf->next_vsi = vsi->idx; -	if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf) -		pf->next_vsi = vsi->idx; +	pf->next_vsi = vsi->idx; +	ice_vsi_free_stats(vsi);  	ice_vsi_free_arrays(vsi);  	mutex_unlock(&pf->sw_mutex);  	devm_kfree(dev, vsi); +} -	return 0; +void ice_vsi_delete(struct ice_vsi *vsi) +{ +	ice_vsi_delete_from_hw(vsi); +	ice_vsi_free(vsi);  }  /** @@ -448,123 +545,140 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d  }  /** - * ice_vsi_alloc - Allocates the next available struct VSI in the PF - * @pf: board private structure - * @vsi_type: type of VSI - * @ch: ptr to channel - * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL - * - * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL, - * it may be NULL in the case there is no association with a VF. For - * ICE_VSI_VF the VF pointer *must not* be NULL. - * - * returns a pointer to a VSI on success, NULL on failure. + * ice_vsi_alloc_stat_arrays - Allocate statistics arrays + * @vsi: VSI pointer   */ -static struct ice_vsi * -ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, -	      struct ice_channel *ch, struct ice_vf *vf) +static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)  { -	struct device *dev = ice_pf_to_dev(pf); -	struct ice_vsi *vsi = NULL; +	struct ice_vsi_stats *vsi_stat; +	struct ice_pf *pf = vsi->back; -	if (WARN_ON(vsi_type == ICE_VSI_VF && !vf)) -		return NULL; +	if (vsi->type == ICE_VSI_CHNL) +		return 0; +	if (!pf->vsi_stats) +		return -ENOENT; -	/* Need to protect the allocation of the VSIs at the PF level */ -	mutex_lock(&pf->sw_mutex); +	if (pf->vsi_stats[vsi->idx]) +	/* realloc will happen in rebuild path */ +		return 0; -	/* If we have already allocated our maximum number of VSIs, -	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index -	 * is available to be populated -	 */ -	if (pf->next_vsi == ICE_NO_VSI) { -		dev_dbg(dev, "out of VSI slots!\n"); -		goto unlock_pf; -	} +	vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL); +	if (!vsi_stat) +		return -ENOMEM; -	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); -	if (!vsi) -		goto unlock_pf; +	vsi_stat->tx_ring_stats = +		kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats), +			GFP_KERNEL); +	if (!vsi_stat->tx_ring_stats) +		goto err_alloc_tx; -	vsi->type = vsi_type; -	vsi->back = pf; -	set_bit(ICE_VSI_DOWN, vsi->state); +	vsi_stat->rx_ring_stats = +		kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats), +			GFP_KERNEL); +	if (!vsi_stat->rx_ring_stats) +		goto err_alloc_rx; -	if (vsi_type == ICE_VSI_VF) -		ice_vsi_set_num_qs(vsi, vf); -	else if (vsi_type != ICE_VSI_CHNL) -		ice_vsi_set_num_qs(vsi, NULL); +	pf->vsi_stats[vsi->idx] = vsi_stat; -	switch (vsi->type) { -	case ICE_VSI_SWITCHDEV_CTRL: +	return 0; + +err_alloc_rx: +	kfree(vsi_stat->rx_ring_stats); +err_alloc_tx: +	kfree(vsi_stat->tx_ring_stats); +	kfree(vsi_stat); +	pf->vsi_stats[vsi->idx] = NULL; +	return -ENOMEM; +} + +/** + * ice_vsi_alloc_def - set default values for already allocated VSI + * @vsi: ptr to VSI + * @ch: ptr to channel + */ +static int +ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) +{ +	if (vsi->type != ICE_VSI_CHNL) { +		ice_vsi_set_num_qs(vsi);  		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings; +			return -ENOMEM; +	} +	switch (vsi->type) { +	case ICE_VSI_SWITCHDEV_CTRL:  		/* Setup eswitch MSIX irq handler for VSI */  		vsi->irq_handler = ice_eswitch_msix_clean_rings;  		break;  	case ICE_VSI_PF: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings; -  		/* Setup default MSIX irq handler for VSI */  		vsi->irq_handler = ice_msix_clean_rings;  		break;  	case ICE_VSI_CTRL: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings; -  		/* Setup ctrl VSI MSIX irq handler */  		vsi->irq_handler = ice_msix_clean_ctrl_vsi; - -		/* For the PF control VSI this is NULL, for the VF control VSI -		 * this will be the first VF to allocate it. -		 */ -		vsi->vf = vf; -		break; -	case ICE_VSI_VF: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings; -		vsi->vf = vf;  		break;  	case ICE_VSI_CHNL:  		if (!ch) -			goto err_rings; +			return -EINVAL; +  		vsi->num_rxq = ch->num_rxq;  		vsi->num_txq = ch->num_txq;  		vsi->next_base_q = ch->base_q;  		break; +	case ICE_VSI_VF:  	case ICE_VSI_LB: -		if (ice_vsi_alloc_arrays(vsi)) -			goto err_rings;  		break;  	default: -		dev_warn(dev, "Unknown VSI type %d\n", vsi->type); -		goto unlock_pf; +		ice_vsi_free_arrays(vsi); +		return -EINVAL;  	} -	if (vsi->type == ICE_VSI_CTRL && !vf) { -		/* Use the last VSI slot as the index for PF control VSI */ -		vsi->idx = pf->num_alloc_vsi - 1; -		pf->ctrl_vsi_idx = vsi->idx; -		pf->vsi[vsi->idx] = vsi; -	} else { -		/* fill slot and make note of the index */ -		vsi->idx = pf->next_vsi; -		pf->vsi[pf->next_vsi] = vsi; +	return 0; +} -		/* prepare pf->next_vsi for next use */ -		pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, -						 pf->next_vsi); +/** + * ice_vsi_alloc - Allocates the next available struct VSI in the PF + * @pf: board private structure + * + * Reserves a VSI index from the PF and allocates an empty VSI structure + * without a type. The VSI structure must later be initialized by calling + * ice_vsi_cfg(). + * + * returns a pointer to a VSI on success, NULL on failure. + */ +static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) +{ +	struct device *dev = ice_pf_to_dev(pf); +	struct ice_vsi *vsi = NULL; + +	/* Need to protect the allocation of the VSIs at the PF level */ +	mutex_lock(&pf->sw_mutex); + +	/* If we have already allocated our maximum number of VSIs, +	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index +	 * is available to be populated +	 */ +	if (pf->next_vsi == ICE_NO_VSI) { +		dev_dbg(dev, "out of VSI slots!\n"); +		goto unlock_pf;  	} -	if (vsi->type == ICE_VSI_CTRL && vf) -		vf->ctrl_vsi_idx = vsi->idx; -	goto unlock_pf; +	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); +	if (!vsi) +		goto unlock_pf; + +	vsi->back = pf; +	set_bit(ICE_VSI_DOWN, vsi->state); + +	/* fill slot and make note of the index */ +	vsi->idx = pf->next_vsi; +	pf->vsi[pf->next_vsi] = vsi; + +	/* prepare pf->next_vsi for next use */ +	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, +					 pf->next_vsi); -err_rings: -	devm_kfree(dev, vsi); -	vsi = NULL;  unlock_pf:  	mutex_unlock(&pf->sw_mutex);  	return vsi; @@ -1129,12 +1243,15 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)  /**   * ice_vsi_init - Create and initialize a VSI   * @vsi: the VSI being configured - * @init_vsi: is this call creating a VSI + * @vsi_flags: VSI configuration flags + * + * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to + * reconfigure an existing context.   *   * This initializes a VSI context depending on the VSI type to be added and   * passes it down to the add_vsi aq command to create a new VSI.   */ -static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) +static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)  {  	struct ice_pf *pf = vsi->back;  	struct ice_hw *hw = &pf->hw; @@ -1196,7 +1313,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)  		/* if updating VSI context, make sure to set valid_section:  		 * to indicate which section of VSI context being updated  		 */ -		if (!init_vsi) +		if (!(vsi_flags & ICE_VSI_FLAG_INIT))  			ctxt->info.valid_sections |=  				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);  	} @@ -1209,7 +1326,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)  		if (ret)  			goto out; -		if (!init_vsi) /* means VSI being updated */ +		if (!(vsi_flags & ICE_VSI_FLAG_INIT)) +			/* means VSI being updated */  			/* must to indicate which section of VSI context are  			 * being modified  			 */ @@ -1224,7 +1342,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)  			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);  	} -	if (init_vsi) { +	if (vsi_flags & ICE_VSI_FLAG_INIT) {  		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);  		if (ret) {  			dev_err(dev, "Add VSI failed, err %d\n", ret); @@ -1388,7 +1506,7 @@ static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)   * ice_vsi_setup_vector_base - Set up the base vector for the given VSI   * @vsi: ptr to the VSI   * - * This should only be called after ice_vsi_alloc() which allocates the + * This should only be called after ice_vsi_alloc_def() which allocates the   * corresponding SW VSI structure and initializes num_queue_pairs for the   * newly allocated VSI.   * @@ -1795,11 +1913,15 @@ void ice_update_eth_stats(struct ice_vsi *vsi)  {  	struct ice_eth_stats *prev_es, *cur_es;  	struct ice_hw *hw = &vsi->back->hw; +	struct ice_pf *pf = vsi->back;  	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */  	prev_es = &vsi->eth_stats_prev;  	cur_es = &vsi->eth_stats; +	if (ice_is_reset_in_progress(pf->state)) +		vsi->stat_offsets_loaded = false; +  	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,  			  &prev_es->rx_bytes, &cur_es->rx_bytes); @@ -1840,8 +1962,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)  void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)  {  	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { -		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; -		vsi->rx_buf_len = ICE_RXBUF_2048; +		vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; +		vsi->rx_buf_len = ICE_RXBUF_1664;  #if (PAGE_SIZE < 8192)  	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&  		   (vsi->netdev->mtu <= ETH_DATA_LEN)) { @@ -1850,11 +1972,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)  #endif  	} else {  		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; -#if (PAGE_SIZE < 8192)  		vsi->rx_buf_len = ICE_RXBUF_3072; -#else -		vsi->rx_buf_len = ICE_RXBUF_2048; -#endif  	}  } @@ -2493,54 +2611,97 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)  }  /** - * ice_vsi_setup - Set up a VSI by a given type - * @pf: board private structure - * @pi: pointer to the port_info instance - * @vsi_type: VSI type - * @vf: pointer to VF to which this VSI connects. This field is used primarily - *      for the ICE_VSI_VF type. Other VSI types should pass NULL. - * @ch: ptr to channel - * - * This allocates the sw VSI structure and its queue resources. + * ice_free_vf_ctrl_res - Free the VF control VSI resource + * @pf: pointer to PF structure + * @vsi: the VSI to free resources for   * - * Returns pointer to the successfully allocated and configured VSI sw struct on - * success, NULL on failure. + * Check if the VF control VSI resource is still in use. If no VF is using it + * any more, release the VSI resource. Otherwise, leave it to be cleaned up + * once no other VF uses it.   */ -struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, -	      enum ice_vsi_type vsi_type, struct ice_vf *vf, -	      struct ice_channel *ch) +static void ice_free_vf_ctrl_res(struct ice_pf *pf,  struct ice_vsi *vsi) +{ +	struct ice_vf *vf; +	unsigned int bkt; + +	rcu_read_lock(); +	ice_for_each_vf_rcu(pf, bkt, vf) { +		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { +			rcu_read_unlock(); +			return; +		} +	} +	rcu_read_unlock(); + +	/* No other VFs left that have control VSI. It is now safe to reclaim +	 * SW interrupts back to the common pool. +	 */ +	ice_free_res(pf->irq_tracker, vsi->base_vector, +		     ICE_RES_VF_CTRL_VEC_ID); +	pf->num_avail_sw_msix += vsi->num_q_vectors; +} + +static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)  {  	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };  	struct device *dev = ice_pf_to_dev(pf); -	struct ice_vsi *vsi;  	int ret, i; -	if (vsi_type == ICE_VSI_CHNL) -		vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL); -	else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) -		vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf); -	else -		vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL); +	/* configure VSI nodes based on number of queues and TC's */ +	ice_for_each_traffic_class(i) { +		if (!(vsi->tc_cfg.ena_tc & BIT(i))) +			continue; -	if (!vsi) { -		dev_err(dev, "could not allocate VSI\n"); -		return NULL; +		if (vsi->type == ICE_VSI_CHNL) { +			if (!vsi->alloc_txq && vsi->num_txq) +				max_txqs[i] = vsi->num_txq; +			else +				max_txqs[i] = pf->num_lan_tx; +		} else { +			max_txqs[i] = vsi->alloc_txq; +		} +	} + +	dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); +	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, +			      max_txqs); +	if (ret) { +		dev_err(dev, "VSI %d failed lan queue config, error %d\n", +			vsi->vsi_num, ret); +		return ret;  	} -	vsi->port_info = pi; +	return 0; +} + +/** + * ice_vsi_cfg_def - configure default VSI based on the type + * @vsi: pointer to VSI + * @params: the parameters to configure this VSI with + */ +static int +ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +{ +	struct device *dev = ice_pf_to_dev(vsi->back); +	struct ice_pf *pf = vsi->back; +	int ret; +  	vsi->vsw = pf->first_sw; -	if (vsi->type == ICE_VSI_PF) -		vsi->ethtype = ETH_P_PAUSE; + +	ret = ice_vsi_alloc_def(vsi, params->ch); +	if (ret) +		return ret; + +	/* allocate memory for Tx/Rx ring stat pointers */ +	if (ice_vsi_alloc_stat_arrays(vsi)) +		goto unroll_vsi_alloc;  	ice_alloc_fd_res(vsi); -	if (vsi_type != ICE_VSI_CHNL) { -		if (ice_vsi_get_qs(vsi)) { -			dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", -				vsi->idx); -			goto unroll_vsi_alloc; -		} +	if (ice_vsi_get_qs(vsi)) { +		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", +			vsi->idx); +		goto unroll_vsi_alloc_stat;  	}  	/* set RSS capabilities */ @@ -2550,7 +2711,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  	ice_vsi_set_tc_cfg(vsi);  	/* create the VSI */ -	ret = ice_vsi_init(vsi, true); +	ret = ice_vsi_init(vsi, params->flags);  	if (ret)  		goto unroll_get_qs; @@ -2576,7 +2737,19 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  		if (ret)  			goto unroll_vector_base; +		ret = ice_vsi_alloc_ring_stats(vsi); +		if (ret) +			goto unroll_vector_base; +  		ice_vsi_map_rings_to_vectors(vsi); +		if (ice_is_xdp_ena_vsi(vsi)) { +			ret = ice_vsi_determine_xdp_res(vsi); +			if (ret) +				goto unroll_vector_base; +			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); +			if (ret) +				goto unroll_vector_base; +		}  		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */  		if (vsi->type != ICE_VSI_CTRL) @@ -2614,6 +2787,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  		if (ret)  			goto unroll_vector_base; +		ret = ice_vsi_alloc_ring_stats(vsi); +		if (ret) +			goto unroll_vector_base;  		/* Do not exit if configuring RSS had an issue, at least  		 * receive traffic on first queue. Hence no need to capture  		 * return value @@ -2627,36 +2803,167 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  		ret = ice_vsi_alloc_rings(vsi);  		if (ret)  			goto unroll_vsi_init; + +		ret = ice_vsi_alloc_ring_stats(vsi); +		if (ret) +			goto unroll_vector_base; +  		break;  	default:  		/* clean up the resources and exit */  		goto unroll_vsi_init;  	} -	/* configure VSI nodes based on number of queues and TC's */ -	ice_for_each_traffic_class(i) { -		if (!(vsi->tc_cfg.ena_tc & BIT(i))) -			continue; +	return 0; -		if (vsi->type == ICE_VSI_CHNL) { -			if (!vsi->alloc_txq && vsi->num_txq) -				max_txqs[i] = vsi->num_txq; -			else -				max_txqs[i] = pf->num_lan_tx; +unroll_vector_base: +	/* reclaim SW interrupts back to the common pool */ +	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); +	pf->num_avail_sw_msix += vsi->num_q_vectors; +unroll_alloc_q_vector: +	ice_vsi_free_q_vectors(vsi); +unroll_vsi_init: +	ice_vsi_delete_from_hw(vsi); +unroll_get_qs: +	ice_vsi_put_qs(vsi); +unroll_vsi_alloc_stat: +	ice_vsi_free_stats(vsi); +unroll_vsi_alloc: +	ice_vsi_free_arrays(vsi); +	return ret; +} + +/** + * ice_vsi_cfg - configure a previously allocated VSI + * @vsi: pointer to VSI + * @params: parameters used to configure this VSI + */ +int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +{ +	struct ice_pf *pf = vsi->back; +	int ret; + +	if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) +		return -EINVAL; + +	vsi->type = params->type; +	vsi->port_info = params->pi; + +	/* For VSIs which don't have a connected VF, this will be NULL */ +	vsi->vf = params->vf; + +	ret = ice_vsi_cfg_def(vsi, params); +	if (ret) +		return ret; + +	ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); +	if (ret) +		ice_vsi_decfg(vsi); + +	if (vsi->type == ICE_VSI_CTRL) { +		if (vsi->vf) { +			WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI); +			vsi->vf->ctrl_vsi_idx = vsi->idx;  		} else { -			max_txqs[i] = vsi->alloc_txq; +			WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI); +			pf->ctrl_vsi_idx = vsi->idx;  		}  	} -	dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); -	ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, -			      max_txqs); -	if (ret) { -		dev_err(dev, "VSI %d failed lan queue config, error %d\n", -			vsi->vsi_num, ret); -		goto unroll_clear_rings; +	return ret; +} + +/** + * ice_vsi_decfg - remove all VSI configuration + * @vsi: pointer to VSI + */ +void ice_vsi_decfg(struct ice_vsi *vsi) +{ +	struct ice_pf *pf = vsi->back; +	int err; + +	/* The Rx rule will only exist to remove if the LLDP FW +	 * engine is currently stopped +	 */ +	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && +	    !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) +		ice_cfg_sw_lldp(vsi, false, false); + +	ice_fltr_remove_all(vsi); +	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); +	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); +	if (err) +		dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", +			vsi->vsi_num, err); + +	if (ice_is_xdp_ena_vsi(vsi)) +		/* return value check can be skipped here, it always returns +		 * 0 if reset is in progress +		 */ +		ice_destroy_xdp_rings(vsi); + +	ice_vsi_clear_rings(vsi); +	ice_vsi_free_q_vectors(vsi); +	ice_vsi_put_qs(vsi); +	ice_vsi_free_arrays(vsi); + +	/* SR-IOV determines needed MSIX resources all at once instead of per +	 * VSI since when VFs are spawned we know how many VFs there are and how +	 * many interrupts each VF needs. SR-IOV MSIX resources are also +	 * cleared in the same manner. +	 */ +	if (vsi->type == ICE_VSI_CTRL && vsi->vf) { +		ice_free_vf_ctrl_res(pf, vsi); +	} else if (vsi->type != ICE_VSI_VF) { +		/* reclaim SW interrupts back to the common pool */ +		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); +		pf->num_avail_sw_msix += vsi->num_q_vectors; +		vsi->base_vector = 0; +	} + +	if (vsi->type == ICE_VSI_VF && +	    vsi->agg_node && vsi->agg_node->valid) +		vsi->agg_node->num_vsis--; +	if (vsi->agg_node) { +		vsi->agg_node->valid = false; +		vsi->agg_node->agg_id = 0; +	} +} + +/** + * ice_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @params: parameters to use when creating the VSI + * + * This allocates the sw VSI structure and its queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. + */ +struct ice_vsi * +ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) +{ +	struct device *dev = ice_pf_to_dev(pf); +	struct ice_vsi *vsi; +	int ret; + +	/* ice_vsi_setup can only initialize a new VSI, and we must have +	 * a port_info structure for it. +	 */ +	if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || +	    WARN_ON(!params->pi)) +		return NULL; + +	vsi = ice_vsi_alloc(pf); +	if (!vsi) { +		dev_err(dev, "could not allocate VSI\n"); +		return NULL;  	} +	ret = ice_vsi_cfg(vsi, params); +	if (ret) +		goto err_vsi_cfg; +  	/* Add switch rule to drop all Tx Flow Control Frames, of look up  	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending  	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames. @@ -2666,33 +2973,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB  	 * settings in the HW.  	 */ -	if (!ice_is_safe_mode(pf)) -		if (vsi->type == ICE_VSI_PF) { -			ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, -					 ICE_DROP_PACKET); -			ice_cfg_sw_lldp(vsi, true, true); -		} +	if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { +		ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, +				 ICE_DROP_PACKET); +		ice_cfg_sw_lldp(vsi, true, true); +	}  	if (!vsi->agg_node)  		ice_set_agg_vsi(vsi); +  	return vsi; -unroll_clear_rings: -	ice_vsi_clear_rings(vsi); -unroll_vector_base: -	/* reclaim SW interrupts back to the common pool */ -	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); -	pf->num_avail_sw_msix += vsi->num_q_vectors; -unroll_alloc_q_vector: -	ice_vsi_free_q_vectors(vsi); -unroll_vsi_init: -	ice_vsi_delete(vsi); -unroll_get_qs: -	ice_vsi_put_qs(vsi); -unroll_vsi_alloc: -	if (vsi_type == ICE_VSI_VF) +err_vsi_cfg: +	if (params->type == ICE_VSI_VF)  		ice_enable_lag(pf->lag); -	ice_vsi_clear(vsi); +	ice_vsi_free(vsi);  	return NULL;  } @@ -2956,37 +3251,6 @@ void ice_napi_del(struct ice_vsi *vsi)  }  /** - * ice_free_vf_ctrl_res - Free the VF control VSI resource - * @pf: pointer to PF structure - * @vsi: the VSI to free resources for - * - * Check if the VF control VSI resource is still in use. If no VF is using it - * any more, release the VSI resource. Otherwise, leave it to be cleaned up - * once no other VF uses it. - */ -static void ice_free_vf_ctrl_res(struct ice_pf *pf,  struct ice_vsi *vsi) -{ -	struct ice_vf *vf; -	unsigned int bkt; - -	rcu_read_lock(); -	ice_for_each_vf_rcu(pf, bkt, vf) { -		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) { -			rcu_read_unlock(); -			return; -		} -	} -	rcu_read_unlock(); - -	/* No other VFs left that have control VSI. It is now safe to reclaim -	 * SW interrupts back to the common pool. -	 */ -	ice_free_res(pf->irq_tracker, vsi->base_vector, -		     ICE_RES_VF_CTRL_VEC_ID); -	pf->num_avail_sw_msix += vsi->num_q_vectors; -} - -/**   * ice_vsi_release - Delete a VSI and free its resources   * @vsi: the VSI being removed   * @@ -2995,7 +3259,6 @@ static void ice_free_vf_ctrl_res(struct ice_pf *pf,  struct ice_vsi *vsi)  int ice_vsi_release(struct ice_vsi *vsi)  {  	struct ice_pf *pf; -	int err;  	if (!vsi->back)  		return -ENODEV; @@ -3013,50 +3276,14 @@ int ice_vsi_release(struct ice_vsi *vsi)  		clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);  	} +	if (vsi->type == ICE_VSI_PF) +		ice_devlink_destroy_pf_port(pf); +  	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))  		ice_rss_clean(vsi); -	/* Disable VSI and free resources */ -	if (vsi->type != ICE_VSI_LB) -		ice_vsi_dis_irq(vsi);  	ice_vsi_close(vsi); - -	/* SR-IOV determines needed MSIX resources all at once instead of per -	 * VSI since when VFs are spawned we know how many VFs there are and how -	 * many interrupts each VF needs. SR-IOV MSIX resources are also -	 * cleared in the same manner. -	 */ -	if (vsi->type == ICE_VSI_CTRL && vsi->vf) { -		ice_free_vf_ctrl_res(pf, vsi); -	} else if (vsi->type != ICE_VSI_VF) { -		/* reclaim SW interrupts back to the common pool */ -		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); -		pf->num_avail_sw_msix += vsi->num_q_vectors; -	} - -	if (!ice_is_safe_mode(pf)) { -		if (vsi->type == ICE_VSI_PF) { -			ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, -					    ICE_DROP_PACKET); -			ice_cfg_sw_lldp(vsi, true, false); -			/* The Rx rule will only exist to remove if the LLDP FW -			 * engine is currently stopped -			 */ -			if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) -				ice_cfg_sw_lldp(vsi, false, false); -		} -	} - -	if (ice_is_vsi_dflt_vsi(vsi)) -		ice_clear_dflt_vsi(vsi); -	ice_fltr_remove_all(vsi); -	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); -	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); -	if (err) -		dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", -			vsi->vsi_num, err); -	ice_vsi_delete(vsi); -	ice_vsi_free_q_vectors(vsi); +	ice_vsi_decfg(vsi);  	if (vsi->netdev) {  		if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { @@ -3070,22 +3297,12 @@ int ice_vsi_release(struct ice_vsi *vsi)  		}  	} -	if (vsi->type == ICE_VSI_PF) -		ice_devlink_destroy_pf_port(pf); - -	if (vsi->type == ICE_VSI_VF && -	    vsi->agg_node && vsi->agg_node->valid) -		vsi->agg_node->num_vsis--; -	ice_vsi_clear_rings(vsi); - -	ice_vsi_put_qs(vsi); -  	/* retain SW VSI data structure since it is needed to unregister and  	 * free VSI netdev when PF is not in reset recovery pending state,\  	 * for ex: during rmmod.  	 */  	if (!ice_is_reset_in_progress(pf->state)) -		ice_vsi_clear(vsi); +		ice_vsi_delete(vsi);  	return 0;  } @@ -3205,31 +3422,72 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,  }  /** + * ice_vsi_realloc_stat_arrays - Frees unused stat structures + * @vsi: VSI pointer + * @prev_txq: Number of Tx rings before ring reallocation + * @prev_rxq: Number of Rx rings before ring reallocation + */ +static void +ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) +{ +	struct ice_vsi_stats *vsi_stat; +	struct ice_pf *pf = vsi->back; +	int i; + +	if (!prev_txq || !prev_rxq) +		return; +	if (vsi->type == ICE_VSI_CHNL) +		return; + +	vsi_stat = pf->vsi_stats[vsi->idx]; + +	if (vsi->num_txq < prev_txq) { +		for (i = vsi->num_txq; i < prev_txq; i++) { +			if (vsi_stat->tx_ring_stats[i]) { +				kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); +				WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); +			} +		} +	} + +	if (vsi->num_rxq < prev_rxq) { +		for (i = vsi->num_rxq; i < prev_rxq; i++) { +			if (vsi_stat->rx_ring_stats[i]) { +				kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); +				WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); +			} +		} +	} +} + +/**   * ice_vsi_rebuild - Rebuild VSI after reset   * @vsi: VSI to be rebuild - * @init_vsi: is this an initialization or a reconfigure of the VSI + * @vsi_flags: flags used for VSI rebuild flow + * + * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or + * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.   *   * Returns 0 on success and negative value on failure   */ -int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) +int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)  { -	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; +	struct ice_vsi_cfg_params params = {};  	struct ice_coalesce_stored *coalesce; +	int ret, prev_txq, prev_rxq;  	int prev_num_q_vectors = 0; -	enum ice_vsi_type vtype;  	struct ice_pf *pf; -	int ret, i;  	if (!vsi)  		return -EINVAL; +	params = ice_vsi_to_params(vsi); +	params.flags = vsi_flags; +  	pf = vsi->back; -	vtype = vsi->type; -	if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf)) +	if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))  		return -EINVAL; -	ice_vsi_init_vlan_ops(vsi); -  	coalesce = kcalloc(vsi->num_q_vectors,  			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);  	if (!coalesce) @@ -3237,173 +3495,35 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)  	prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); -	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); -	ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); -	if (ret) -		dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", -			vsi->vsi_num, ret); -	ice_vsi_free_q_vectors(vsi); - -	/* SR-IOV determines needed MSIX resources all at once instead of per -	 * VSI since when VFs are spawned we know how many VFs there are and how -	 * many interrupts each VF needs. SR-IOV MSIX resources are also -	 * cleared in the same manner. -	 */ -	if (vtype != ICE_VSI_VF) { -		/* reclaim SW interrupts back to the common pool */ -		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); -		pf->num_avail_sw_msix += vsi->num_q_vectors; -		vsi->base_vector = 0; -	} - -	if (ice_is_xdp_ena_vsi(vsi)) -		/* return value check can be skipped here, it always returns -		 * 0 if reset is in progress -		 */ -		ice_destroy_xdp_rings(vsi); -	ice_vsi_put_qs(vsi); -	ice_vsi_clear_rings(vsi); -	ice_vsi_free_arrays(vsi); -	if (vtype == ICE_VSI_VF) -		ice_vsi_set_num_qs(vsi, vsi->vf); -	else -		ice_vsi_set_num_qs(vsi, NULL); - -	ret = ice_vsi_alloc_arrays(vsi); -	if (ret < 0) -		goto err_vsi; +	prev_txq = vsi->num_txq; +	prev_rxq = vsi->num_rxq; -	ice_vsi_get_qs(vsi); - -	ice_alloc_fd_res(vsi); -	ice_vsi_set_tc_cfg(vsi); - -	/* Initialize VSI struct elements and create VSI in FW */ -	ret = ice_vsi_init(vsi, init_vsi); -	if (ret < 0) -		goto err_vsi; - -	switch (vtype) { -	case ICE_VSI_CTRL: -	case ICE_VSI_SWITCHDEV_CTRL: -	case ICE_VSI_PF: -		ret = ice_vsi_alloc_q_vectors(vsi); -		if (ret) -			goto err_rings; - -		ret = ice_vsi_setup_vector_base(vsi); -		if (ret) -			goto err_vectors; - -		ret = ice_vsi_set_q_vectors_reg_idx(vsi); -		if (ret) -			goto err_vectors; - -		ret = ice_vsi_alloc_rings(vsi); -		if (ret) -			goto err_vectors; - -		ice_vsi_map_rings_to_vectors(vsi); -		if (ice_is_xdp_ena_vsi(vsi)) { -			ret = ice_vsi_determine_xdp_res(vsi); -			if (ret) -				goto err_vectors; -			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); -			if (ret) -				goto err_vectors; -		} -		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */ -		if (vtype != ICE_VSI_CTRL) -			/* Do not exit if configuring RSS had an issue, at -			 * least receive traffic on first queue. Hence no -			 * need to capture return value -			 */ -			if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) -				ice_vsi_cfg_rss_lut_key(vsi); - -		/* disable or enable CRC stripping */ -		if (vsi->netdev) -			ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features & -					      NETIF_F_RXFCS)); - -		break; -	case ICE_VSI_VF: -		ret = ice_vsi_alloc_q_vectors(vsi); -		if (ret) -			goto err_rings; - -		ret = ice_vsi_set_q_vectors_reg_idx(vsi); -		if (ret) -			goto err_vectors; - -		ret = ice_vsi_alloc_rings(vsi); -		if (ret) -			goto err_vectors; - -		break; -	case ICE_VSI_CHNL: -		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { -			ice_vsi_cfg_rss_lut_key(vsi); -			ice_vsi_set_rss_flow_fld(vsi); -		} -		break; -	default: -		break; -	} - -	/* configure VSI nodes based on number of queues and TC's */ -	for (i = 0; i < vsi->tc_cfg.numtc; i++) { -		/* configure VSI nodes based on number of queues and TC's. -		 * ADQ creates VSIs for each TC/Channel but doesn't -		 * allocate queues instead it reconfigures the PF queues -		 * as per the TC command. So max_txqs should point to the -		 * PF Tx queues. -		 */ -		if (vtype == ICE_VSI_CHNL) -			max_txqs[i] = pf->num_lan_tx; -		else -			max_txqs[i] = vsi->alloc_txq; - -		if (ice_is_xdp_ena_vsi(vsi)) -			max_txqs[i] += vsi->num_xdp_txq; -	} - -	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) -		/* If MQPRIO is set, means channel code path, hence for main -		 * VSI's, use TC as 1 -		 */ -		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); -	else -		ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, -				      vsi->tc_cfg.ena_tc, max_txqs); +	ice_vsi_decfg(vsi); +	ret = ice_vsi_cfg_def(vsi, ¶ms); +	if (ret) +		goto err_vsi_cfg; +	ret = ice_vsi_cfg_tc_lan(pf, vsi);  	if (ret) { -		dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", -			vsi->vsi_num, ret); -		if (init_vsi) { +		if (vsi_flags & ICE_VSI_FLAG_INIT) {  			ret = -EIO; -			goto err_vectors; +			goto err_vsi_cfg_tc_lan;  		} else { +			kfree(coalesce);  			return ice_schedule_reset(pf, ICE_RESET_PFR);  		}  	} + +	ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); +  	ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);  	kfree(coalesce);  	return 0; -err_vectors: -	ice_vsi_free_q_vectors(vsi); -err_rings: -	if (vsi->netdev) { -		vsi->current_netdev_flags = 0; -		unregister_netdev(vsi->netdev); -		free_netdev(vsi->netdev); -		vsi->netdev = NULL; -	} -err_vsi: -	ice_vsi_clear(vsi); -	set_bit(ICE_RESET_FAILED, pf->state); +err_vsi_cfg_tc_lan: +	ice_vsi_decfg(vsi); +err_vsi_cfg:  	kfree(coalesce);  	return ret;  } @@ -3728,9 +3848,9 @@ static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes   */  void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)  { -	u64_stats_update_begin(&tx_ring->syncp); -	ice_update_ring_stats(&tx_ring->stats, pkts, bytes); -	u64_stats_update_end(&tx_ring->syncp); +	u64_stats_update_begin(&tx_ring->ring_stats->syncp); +	ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes); +	u64_stats_update_end(&tx_ring->ring_stats->syncp);  }  /** @@ -3741,9 +3861,9 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)   */  void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)  { -	u64_stats_update_begin(&rx_ring->syncp); -	ice_update_ring_stats(&rx_ring->stats, pkts, bytes); -	u64_stats_update_end(&rx_ring->syncp); +	u64_stats_update_begin(&rx_ring->ring_stats->syncp); +	ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes); +	u64_stats_update_end(&rx_ring->ring_stats->syncp);  }  /** @@ -3850,33 +3970,11 @@ int ice_clear_dflt_vsi(struct ice_vsi *vsi)   */  int ice_get_link_speed_mbps(struct ice_vsi *vsi)  { -	switch (vsi->port_info->phy.link_info.link_speed) { -	case ICE_AQ_LINK_SPEED_100GB: -		return SPEED_100000; -	case ICE_AQ_LINK_SPEED_50GB: -		return SPEED_50000; -	case ICE_AQ_LINK_SPEED_40GB: -		return SPEED_40000; -	case ICE_AQ_LINK_SPEED_25GB: -		return SPEED_25000; -	case ICE_AQ_LINK_SPEED_20GB: -		return SPEED_20000; -	case ICE_AQ_LINK_SPEED_10GB: -		return SPEED_10000; -	case ICE_AQ_LINK_SPEED_5GB: -		return SPEED_5000; -	case ICE_AQ_LINK_SPEED_2500MB: -		return SPEED_2500; -	case ICE_AQ_LINK_SPEED_1000MB: -		return SPEED_1000; -	case ICE_AQ_LINK_SPEED_100MB: -		return SPEED_100; -	case ICE_AQ_LINK_SPEED_10MB: -		return SPEED_10; -	case ICE_AQ_LINK_SPEED_UNKNOWN: -	default: -		return 0; -	} +	unsigned int link_speed; + +	link_speed = vsi->port_info->phy.link_info.link_speed; + +	return (int)ice_get_link_speed(fls(link_speed) - 1);  }  /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index dcdf69a693e9..75221478f2dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -7,6 +7,47 @@  #include "ice.h"  #include "ice_vlan.h" +/* Flags used for VSI configuration and rebuild */ +#define ICE_VSI_FLAG_INIT	BIT(0) +#define ICE_VSI_FLAG_NO_INIT	0 + +/** + * struct ice_vsi_cfg_params - VSI configuration parameters + * @pi: pointer to the port_info instance for the VSI + * @ch: pointer to the channel structure for the VSI, may be NULL + * @vf: pointer to the VF associated with this VSI, may be NULL + * @type: the type of VSI to configure + * @flags: VSI flags used for rebuild and configuration + * + * Parameter structure used when configuring a new VSI. + */ +struct ice_vsi_cfg_params { +	struct ice_port_info *pi; +	struct ice_channel *ch; +	struct ice_vf *vf; +	enum ice_vsi_type type; +	u32 flags; +}; + +/** + * ice_vsi_to_params - Get parameters for an existing VSI + * @vsi: the VSI to get parameters for + * + * Fill a parameter structure for reconfiguring a VSI with its current + * parameters, such as during a rebuild operation. + */ +static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi) +{ +	struct ice_vsi_cfg_params params = {}; + +	params.pi = vsi->port_info; +	params.ch = vsi->ch; +	params.vf = vsi->vf; +	params.type = vsi->type; + +	return params; +} +  const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);  bool ice_pf_state_is_nominal(struct ice_pf *pf); @@ -42,7 +83,6 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);  int ice_set_link(struct ice_vsi *vsi, bool ena);  void ice_vsi_delete(struct ice_vsi *vsi); -int ice_vsi_clear(struct ice_vsi *vsi);  int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); @@ -51,9 +91,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);  void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);  struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, -	      enum ice_vsi_type vsi_type, struct ice_vf *vf, -	      struct ice_channel *ch); +ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);  void ice_napi_del(struct ice_vsi *vsi); @@ -63,6 +101,7 @@ void ice_vsi_close(struct ice_vsi *vsi);  int ice_ena_vsi(struct ice_vsi *vsi, bool locked); +void ice_vsi_decfg(struct ice_vsi *vsi);  void ice_dis_vsi(struct ice_vsi *vsi, bool locked);  int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); @@ -70,7 +109,8 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);  int  ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); -int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi); +int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); +int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);  bool ice_is_reset_in_progress(unsigned long *state);  int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ca2898467dcb..567694bf098b 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -22,6 +22,7 @@  #include "ice_eswitch.h"  #include "ice_tc_lib.h"  #include "ice_vsi_vlan_ops.h" +#include <net/xdp_sock_drv.h>  #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"  static const char ice_driver_string[] = DRV_SUMMARY; @@ -44,7 +45,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX  MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");  #endif /* !CONFIG_DYNAMIC_DEBUG */ -static DEFINE_IDA(ice_aux_ida);  DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);  EXPORT_SYMBOL(ice_xdp_locking_key); @@ -130,12 +130,17 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)  	ice_for_each_txq(vsi, i) {  		struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; +		struct ice_ring_stats *ring_stats;  		if (!tx_ring)  			continue;  		if (ice_ring_ch_enabled(tx_ring))  			continue; +		ring_stats = tx_ring->ring_stats; +		if (!ring_stats) +			continue; +  		if (tx_ring->desc) {  			/* If packet counter has not changed the queue is  			 * likely stalled, so force an interrupt for this @@ -144,8 +149,8 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)  			 * prev_pkt would be negative if there was no  			 * pending work.  			 */ -			packets = tx_ring->stats.pkts & INT_MAX; -			if (tx_ring->tx_stats.prev_pkt == packets) { +			packets = ring_stats->stats.pkts & INT_MAX; +			if (ring_stats->tx_stats.prev_pkt == packets) {  				/* Trigger sw interrupt to revive the queue */  				ice_trigger_sw_intr(hw, tx_ring->q_vector);  				continue; @@ -155,7 +160,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)  			 * to ice_get_tx_pending()  			 */  			smp_rmb(); -			tx_ring->tx_stats.prev_pkt = +			ring_stats->tx_stats.prev_pkt =  			    ice_get_tx_pending(tx_ring) ? packets : -1;  		}  	} @@ -270,6 +275,8 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)  	if (status && status != -EEXIST)  		return status; +	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", +		   vsi->vsi_num, promisc_m);  	return 0;  } @@ -295,24 +302,12 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)  						    promisc_m, 0);  	} +	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", +		   vsi->vsi_num, promisc_m);  	return status;  }  /** - * ice_get_devlink_port - Get devlink port from netdev - * @netdev: the netdevice structure - */ -static struct devlink_port *ice_get_devlink_port(struct net_device *netdev) -{ -	struct ice_pf *pf = ice_netdev_to_pf(netdev); - -	if (!ice_is_switchdev_running(pf)) -		return NULL; - -	return &pf->devlink_port; -} - -/**   * ice_vsi_sync_fltr - Update the VSI filter list to the HW   * @vsi: ptr to the VSI   * @@ -423,6 +418,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)  				}  				err = 0;  				vlan_ops->dis_rx_filtering(vsi); + +				/* promiscuous mode implies allmulticast so +				 * that VSIs that are in promiscuous mode are +				 * subscribed to multicast packets coming to +				 * the port +				 */ +				err = ice_set_promisc(vsi, +						      ICE_MCAST_PROMISC_BITS); +				if (err) +					goto out_promisc;  			}  		} else {  			/* Clear Rx filter to remove traffic from wire */ @@ -439,6 +444,18 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)  				    NETIF_F_HW_VLAN_CTAG_FILTER)  					vlan_ops->ena_rx_filtering(vsi);  			} + +			/* disable allmulti here, but only if allmulti is not +			 * still enabled for the netdev +			 */ +			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { +				err = ice_clear_promisc(vsi, +							ICE_MCAST_PROMISC_BITS); +				if (err) { +					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n", +						   err, vsi->vsi_num); +				} +			}  		}  	}  	goto exit; @@ -547,7 +564,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)  	/* Disable VFs until reset is completed */  	mutex_lock(&pf->vfs.table_lock);  	ice_for_each_vf(pf, bkt, vf) -		ice_set_vf_state_qs_dis(vf); +		ice_set_vf_state_dis(vf);  	mutex_unlock(&pf->vfs.table_lock);  	if (ice_is_eswitch_mode_switchdev(pf)) { @@ -1120,8 +1137,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,  	if (link_up == old_link && link_speed == old_link_speed)  		return 0; -	if (!ice_is_e810(&pf->hw)) -		ice_ptp_link_change(pf, pf->hw.pf_id, link_up); +	ice_ptp_link_change(pf, pf->hw.pf_id, link_up);  	if (ice_is_dcb_active(pf)) {  		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) @@ -2560,21 +2576,26 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)  	ice_for_each_xdp_txq(vsi, i) {  		u16 xdp_q_idx = vsi->alloc_txq + i; +		struct ice_ring_stats *ring_stats;  		struct ice_tx_ring *xdp_ring;  		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); -  		if (!xdp_ring)  			goto free_xdp_rings; +		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); +		if (!ring_stats) { +			ice_free_tx_ring(xdp_ring); +			goto free_xdp_rings; +		} + +		xdp_ring->ring_stats = ring_stats;  		xdp_ring->q_index = xdp_q_idx;  		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];  		xdp_ring->vsi = vsi;  		xdp_ring->netdev = NULL;  		xdp_ring->dev = dev;  		xdp_ring->count = vsi->num_tx_desc; -		xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1; -		xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;  		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);  		if (ice_setup_tx_ring(xdp_ring))  			goto free_xdp_rings; @@ -2589,9 +2610,13 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)  	return 0;  free_xdp_rings: -	for (; i >= 0; i--) -		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) +	for (; i >= 0; i--) { +		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { +			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); +			vsi->xdp_rings[i]->ring_stats = NULL;  			ice_free_tx_ring(vsi->xdp_rings[i]); +		} +	}  	return -ENOMEM;  } @@ -2792,6 +2817,8 @@ free_qmap:  				synchronize_rcu();  				ice_free_tx_ring(vsi->xdp_rings[i]);  			} +			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); +			vsi->xdp_rings[i]->ring_stats = NULL;  			kfree_rcu(vsi->xdp_rings[i], rcu);  			vsi->xdp_rings[i] = NULL;  		} @@ -2860,6 +2887,18 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)  }  /** + * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * @vsi: Pointer to VSI structure + */ +static int ice_max_xdp_frame_size(struct ice_vsi *vsi) +{ +	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) +		return ICE_RXBUF_1664; +	else +		return ICE_RXBUF_3072; +} + +/**   * ice_xdp_setup_prog - Add or remove XDP eBPF program   * @vsi: VSI to setup XDP for   * @prog: XDP program @@ -2869,13 +2908,16 @@ static int  ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,  		   struct netlink_ext_ack *extack)  { -	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; +	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;  	bool if_running = netif_running(vsi->netdev);  	int ret = 0, xdp_ring_err = 0; -	if (frame_size > vsi->rx_buf_len) { -		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); -		return -EOPNOTSUPP; +	if (prog && !prog->aux->xdp_has_frags) { +		if (frame_size > ice_max_xdp_frame_size(vsi)) { +			NL_SET_ERR_MSG_MOD(extack, +					   "MTU is too large for linear frames and XDP prog does not support frags"); +			return -EOPNOTSUPP; +		}  	}  	/* need to stop netdev while setting up the program for Rx rings */ @@ -2896,11 +2938,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,  			if (xdp_ring_err)  				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");  		} +		xdp_features_set_redirect_target(vsi->netdev, true);  		/* reallocate Rx queues that are used for zero-copy */  		xdp_ring_err = ice_realloc_zc_buf(vsi, true);  		if (xdp_ring_err)  			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");  	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) { +		xdp_features_clear_redirect_target(vsi->netdev);  		xdp_ring_err = ice_destroy_xdp_rings(vsi);  		if (xdp_ring_err)  			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); @@ -3315,10 +3359,11 @@ static void ice_napi_add(struct ice_vsi *vsi)  /**   * ice_set_ops - set netdev and ethtools ops for the given netdev - * @netdev: netdev instance + * @vsi: the VSI associated with the new netdev   */ -static void ice_set_ops(struct net_device *netdev) +static void ice_set_ops(struct ice_vsi *vsi)  { +	struct net_device *netdev = vsi->netdev;  	struct ice_pf *pf = ice_netdev_to_pf(netdev);  	if (ice_is_safe_mode(pf)) { @@ -3330,6 +3375,13 @@ static void ice_set_ops(struct net_device *netdev)  	netdev->netdev_ops = &ice_netdev_ops;  	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;  	ice_set_ethtool_ops(netdev); + +	if (vsi->type != ICE_VSI_PF) +		return; + +	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | +			       NETDEV_XDP_ACT_XSK_ZEROCOPY | +			       NETDEV_XDP_ACT_RX_SG;  }  /** @@ -3418,53 +3470,8 @@ static void ice_set_netdev_features(struct net_device *netdev)  	 * be changed at runtime  	 */  	netdev->hw_features |= NETIF_F_RXFCS; -} -/** - * ice_cfg_netdev - Allocate, configure and register a netdev - * @vsi: the VSI associated with the new netdev - * - * Returns 0 on success, negative value on failure - */ -static int ice_cfg_netdev(struct ice_vsi *vsi) -{ -	struct ice_netdev_priv *np; -	struct net_device *netdev; -	u8 mac_addr[ETH_ALEN]; - -	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, -				    vsi->alloc_rxq); -	if (!netdev) -		return -ENOMEM; - -	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); -	vsi->netdev = netdev; -	np = netdev_priv(netdev); -	np->vsi = vsi; - -	ice_set_netdev_features(netdev); - -	ice_set_ops(netdev); - -	if (vsi->type == ICE_VSI_PF) { -		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); -		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); -		eth_hw_addr_set(netdev, mac_addr); -		ether_addr_copy(netdev->perm_addr, mac_addr); -	} - -	netdev->priv_flags |= IFF_UNICAST_FLT; - -	/* Setup netdev TC information */ -	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); - -	/* setup watchdog timeout value to be 5 second */ -	netdev->watchdog_timeo = 5 * HZ; - -	netdev->min_mtu = ETH_MIN_MTU; -	netdev->max_mtu = ICE_MAX_MTU; - -	return 0; +	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);  }  /** @@ -3492,14 +3499,27 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)  static struct ice_vsi *  ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)  { -	return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL); +	struct ice_vsi_cfg_params params = {}; + +	params.type = ICE_VSI_PF; +	params.pi = pi; +	params.flags = ICE_VSI_FLAG_INIT; + +	return ice_vsi_setup(pf, ¶ms);  }  static struct ice_vsi *  ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  		   struct ice_channel *ch)  { -	return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch); +	struct ice_vsi_cfg_params params = {}; + +	params.type = ICE_VSI_CHNL; +	params.pi = pi; +	params.ch = ch; +	params.flags = ICE_VSI_FLAG_INIT; + +	return ice_vsi_setup(pf, ¶ms);  }  /** @@ -3513,7 +3533,13 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,  static struct ice_vsi *  ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)  { -	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL); +	struct ice_vsi_cfg_params params = {}; + +	params.type = ICE_VSI_CTRL; +	params.pi = pi; +	params.flags = ICE_VSI_FLAG_INIT; + +	return ice_vsi_setup(pf, ¶ms);  }  /** @@ -3527,7 +3553,13 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)  struct ice_vsi *  ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)  { -	return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL); +	struct ice_vsi_cfg_params params = {}; + +	params.type = ICE_VSI_LB; +	params.pi = pi; +	params.flags = ICE_VSI_FLAG_INIT; + +	return ice_vsi_setup(pf, ¶ms);  }  /** @@ -3687,20 +3719,6 @@ static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)  }  /** - * ice_tc_indir_block_remove - clean indirect TC block notifications - * @pf: PF structure - */ -static void ice_tc_indir_block_remove(struct ice_pf *pf) -{ -	struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); - -	if (!pf_vsi) -		return; - -	ice_tc_indir_block_unregister(pf_vsi); -} - -/**   * ice_tc_indir_block_register - Register TC indirect block notifications   * @vsi: VSI struct which has the netdev   * @@ -3720,78 +3738,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)  }  /** - * ice_setup_pf_sw - Setup the HW switch on startup or after reset - * @pf: board private structure - * - * Returns 0 on success, negative value on failure - */ -static int ice_setup_pf_sw(struct ice_pf *pf) -{ -	struct device *dev = ice_pf_to_dev(pf); -	bool dvm = ice_is_dvm_ena(&pf->hw); -	struct ice_vsi *vsi; -	int status; - -	if (ice_is_reset_in_progress(pf->state)) -		return -EBUSY; - -	status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); -	if (status) -		return -EIO; - -	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); -	if (!vsi) -		return -ENOMEM; - -	/* init channel list */ -	INIT_LIST_HEAD(&vsi->ch_list); - -	status = ice_cfg_netdev(vsi); -	if (status) -		goto unroll_vsi_setup; -	/* netdev has to be configured before setting frame size */ -	ice_vsi_cfg_frame_size(vsi); - -	/* init indirect block notifications */ -	status = ice_tc_indir_block_register(vsi); -	if (status) { -		dev_err(dev, "Failed to register netdev notifier\n"); -		goto unroll_cfg_netdev; -	} - -	/* Setup DCB netlink interface */ -	ice_dcbnl_setup(vsi); - -	/* registering the NAPI handler requires both the queues and -	 * netdev to be created, which are done in ice_pf_vsi_setup() -	 * and ice_cfg_netdev() respectively -	 */ -	ice_napi_add(vsi); - -	status = ice_init_mac_fltr(pf); -	if (status) -		goto unroll_napi_add; - -	return 0; - -unroll_napi_add: -	ice_tc_indir_block_unregister(vsi); -unroll_cfg_netdev: -	if (vsi) { -		ice_napi_del(vsi); -		if (vsi->netdev) { -			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); -			free_netdev(vsi->netdev); -			vsi->netdev = NULL; -		} -	} - -unroll_vsi_setup: -	ice_vsi_release(vsi); -	return status; -} - -/**   * ice_get_avail_q_count - Get count of queues in use   * @pf_qmap: bitmap to get queue use count from   * @lock: pointer to a mutex that protects access to pf_qmap @@ -4192,12 +4138,13 @@ bool ice_is_wol_supported(struct ice_hw *hw)   * @vsi: VSI being changed   * @new_rx: new number of Rx queues   * @new_tx: new number of Tx queues + * @locked: is adev device_lock held   *   * Only change the number of queues if new_tx, or new_rx is non-0.   *   * Returns 0 on success.   */ -int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)  {  	struct ice_pf *pf = vsi->back;  	int err = 0, timeout = 50; @@ -4219,14 +4166,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)  	/* set for the next time the netdev is started */  	if (!netif_running(vsi->netdev)) { -		ice_vsi_rebuild(vsi, false); +		ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);  		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");  		goto done;  	}  	ice_vsi_close(vsi); -	ice_vsi_rebuild(vsi, false); -	ice_pf_dcb_recfg(pf); +	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); +	ice_pf_dcb_recfg(pf, locked);  	ice_vsi_open(vsi);  done:  	clear_bit(ICE_CFG_BUSY, pf->state); @@ -4488,6 +4435,23 @@ err_vsi_open:  	return err;  } +static void ice_deinit_fdir(struct ice_pf *pf) +{ +	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); + +	if (!vsi) +		return; + +	ice_vsi_manage_fdir(vsi, false); +	ice_vsi_release(vsi); +	if (pf->ctrl_vsi_idx != ICE_NO_VSI) { +		pf->vsi[pf->ctrl_vsi_idx] = NULL; +		pf->ctrl_vsi_idx = ICE_NO_VSI; +	} + +	mutex_destroy(&(&pf->hw)->fdir_fltr_lock); +} +  /**   * ice_get_opt_fw_name - return optional firmware file name or NULL   * @pf: pointer to the PF instance @@ -4587,124 +4551,172 @@ static void ice_print_wake_reason(struct ice_pf *pf)  }  /** - * ice_register_netdev - register netdev and devlink port - * @pf: pointer to the PF struct + * ice_register_netdev - register netdev + * @vsi: pointer to the VSI struct   */ -static int ice_register_netdev(struct ice_pf *pf) +static int ice_register_netdev(struct ice_vsi *vsi)  { -	struct ice_vsi *vsi; -	int err = 0; +	int err; -	vsi = ice_get_main_vsi(pf);  	if (!vsi || !vsi->netdev)  		return -EIO; -	err = ice_devlink_create_pf_port(pf); -	if (err) -		goto err_devlink_create; -  	err = register_netdev(vsi->netdev);  	if (err) -		goto err_register_netdev; +		return err;  	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);  	netif_carrier_off(vsi->netdev);  	netif_tx_stop_all_queues(vsi->netdev); -	devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); -  	return 0; -err_register_netdev: -	ice_devlink_destroy_pf_port(pf); -err_devlink_create: -	free_netdev(vsi->netdev); -	vsi->netdev = NULL; -	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); -	return err; +} + +static void ice_unregister_netdev(struct ice_vsi *vsi) +{ +	if (!vsi || !vsi->netdev) +		return; + +	unregister_netdev(vsi->netdev); +	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);  }  /** - * ice_probe - Device initialization routine - * @pdev: PCI device information struct - * @ent: entry in ice_pci_tbl + * ice_cfg_netdev - Allocate, configure and register a netdev + * @vsi: the VSI associated with the new netdev   * - * Returns 0 on success, negative on failure + * Returns 0 on success, negative value on failure   */ -static int -ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) +static int ice_cfg_netdev(struct ice_vsi *vsi)  { -	struct device *dev = &pdev->dev; -	struct ice_pf *pf; -	struct ice_hw *hw; -	int i, err; +	struct ice_netdev_priv *np; +	struct net_device *netdev; +	u8 mac_addr[ETH_ALEN]; -	if (pdev->is_virtfn) { -		dev_err(dev, "can't probe a virtual function\n"); -		return -EINVAL; +	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, +				    vsi->alloc_rxq); +	if (!netdev) +		return -ENOMEM; + +	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); +	vsi->netdev = netdev; +	np = netdev_priv(netdev); +	np->vsi = vsi; + +	ice_set_netdev_features(netdev); +	ice_set_ops(vsi); + +	if (vsi->type == ICE_VSI_PF) { +		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); +		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); +		eth_hw_addr_set(netdev, mac_addr);  	} -	/* this driver uses devres, see -	 * Documentation/driver-api/driver-model/devres.rst -	 */ -	err = pcim_enable_device(pdev); +	netdev->priv_flags |= IFF_UNICAST_FLT; + +	/* Setup netdev TC information */ +	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); + +	netdev->max_mtu = ICE_MAX_MTU; + +	return 0; +} + +static void ice_decfg_netdev(struct ice_vsi *vsi) +{ +	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); +	free_netdev(vsi->netdev); +	vsi->netdev = NULL; +} + +static int ice_start_eth(struct ice_vsi *vsi) +{ +	int err; + +	err = ice_init_mac_fltr(vsi->back);  	if (err)  		return err; -	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); -	if (err) { -		dev_err(dev, "BAR0 I/O map error %d\n", err); -		return err; -	} +	rtnl_lock(); +	err = ice_vsi_open(vsi); +	rtnl_unlock(); -	pf = ice_allocate_pf(dev); -	if (!pf) -		return -ENOMEM; +	return err; +} -	/* initialize Auxiliary index to invalid value */ -	pf->aux_idx = -1; +static int ice_init_eth(struct ice_pf *pf) +{ +	struct ice_vsi *vsi = ice_get_main_vsi(pf); +	int err; -	/* set up for high or low DMA */ -	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); -	if (err) { -		dev_err(dev, "DMA configuration failed: 0x%x\n", err); +	if (!vsi) +		return -EINVAL; + +	/* init channel list */ +	INIT_LIST_HEAD(&vsi->ch_list); + +	err = ice_cfg_netdev(vsi); +	if (err)  		return err; -	} +	/* Setup DCB netlink interface */ +	ice_dcbnl_setup(vsi); -	pci_enable_pcie_error_reporting(pdev); -	pci_set_master(pdev); +	err = ice_init_mac_fltr(pf); +	if (err) +		goto err_init_mac_fltr; -	pf->pdev = pdev; -	pci_set_drvdata(pdev, pf); -	set_bit(ICE_DOWN, pf->state); -	/* Disable service task until DOWN bit is cleared */ -	set_bit(ICE_SERVICE_DIS, pf->state); +	err = ice_devlink_create_pf_port(pf); +	if (err) +		goto err_devlink_create_pf_port; -	hw = &pf->hw; -	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; -	pci_save_state(pdev); +	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); -	hw->back = pf; -	hw->vendor_id = pdev->vendor; -	hw->device_id = pdev->device; -	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); -	hw->subsystem_vendor_id = pdev->subsystem_vendor; -	hw->subsystem_device_id = pdev->subsystem_device; -	hw->bus.device = PCI_SLOT(pdev->devfn); -	hw->bus.func = PCI_FUNC(pdev->devfn); -	ice_set_ctrlq_len(hw); +	err = ice_register_netdev(vsi); +	if (err) +		goto err_register_netdev; -	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); +	err = ice_tc_indir_block_register(vsi); +	if (err) +		goto err_tc_indir_block_register; -#ifndef CONFIG_DYNAMIC_DEBUG -	if (debug < -1) -		hw->debug_mask = debug; -#endif +	ice_napi_add(vsi); + +	return 0; + +err_tc_indir_block_register: +	ice_unregister_netdev(vsi); +err_register_netdev: +	ice_devlink_destroy_pf_port(pf); +err_devlink_create_pf_port: +err_init_mac_fltr: +	ice_decfg_netdev(vsi); +	return err; +} + +static void ice_deinit_eth(struct ice_pf *pf) +{ +	struct ice_vsi *vsi = ice_get_main_vsi(pf); + +	if (!vsi) +		return; + +	ice_vsi_close(vsi); +	ice_unregister_netdev(vsi); +	ice_devlink_destroy_pf_port(pf); +	ice_tc_indir_block_unregister(vsi); +	ice_decfg_netdev(vsi); +} + +static int ice_init_dev(struct ice_pf *pf) +{ +	struct device *dev = ice_pf_to_dev(pf); +	struct ice_hw *hw = &pf->hw; +	int err;  	err = ice_init_hw(hw);  	if (err) {  		dev_err(dev, "ice_init_hw failed: %d\n", err); -		err = -EIO; -		goto err_exit_unroll; +		return err;  	}  	ice_init_feature_support(pf); @@ -4727,55 +4739,31 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)  	err = ice_init_pf(pf);  	if (err) {  		dev_err(dev, "ice_init_pf failed: %d\n", err); -		goto err_init_pf_unroll; +		goto err_init_pf;  	} -	ice_devlink_init_regions(pf); -  	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;  	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;  	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;  	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; -	i = 0;  	if (pf->hw.tnl.valid_count[TNL_VXLAN]) { -		pf->hw.udp_tunnel_nic.tables[i].n_entries = +		pf->hw.udp_tunnel_nic.tables[0].n_entries =  			pf->hw.tnl.valid_count[TNL_VXLAN]; -		pf->hw.udp_tunnel_nic.tables[i].tunnel_types = +		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =  			UDP_TUNNEL_TYPE_VXLAN; -		i++;  	}  	if (pf->hw.tnl.valid_count[TNL_GENEVE]) { -		pf->hw.udp_tunnel_nic.tables[i].n_entries = +		pf->hw.udp_tunnel_nic.tables[1].n_entries =  			pf->hw.tnl.valid_count[TNL_GENEVE]; -		pf->hw.udp_tunnel_nic.tables[i].tunnel_types = +		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =  			UDP_TUNNEL_TYPE_GENEVE; -		i++; -	} - -	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; -	if (!pf->num_alloc_vsi) { -		err = -EIO; -		goto err_init_pf_unroll; -	} -	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { -		dev_warn(&pf->pdev->dev, -			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", -			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); -		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; -	} - -	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), -			       GFP_KERNEL); -	if (!pf->vsi) { -		err = -ENOMEM; -		goto err_init_pf_unroll;  	}  	err = ice_init_interrupt_scheme(pf);  	if (err) {  		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);  		err = -EIO; -		goto err_init_vsi_unroll; +		goto err_init_interrupt_scheme;  	}  	/* In case of MSIX we are going to setup the misc vector right here @@ -4786,49 +4774,94 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)  	err = ice_req_irq_msix_misc(pf);  	if (err) {  		dev_err(dev, "setup of misc vector failed: %d\n", err); -		goto err_init_interrupt_unroll; +		goto err_req_irq_msix_misc;  	} -	/* create switch struct for the switch element created by FW on boot */ -	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); -	if (!pf->first_sw) { -		err = -ENOMEM; -		goto err_msix_misc_unroll; -	} +	return 0; -	if (hw->evb_veb) -		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; -	else -		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; +err_req_irq_msix_misc: +	ice_clear_interrupt_scheme(pf); +err_init_interrupt_scheme: +	ice_deinit_pf(pf); +err_init_pf: +	ice_deinit_hw(hw); +	return err; +} -	pf->first_sw->pf = pf; +static void ice_deinit_dev(struct ice_pf *pf) +{ +	ice_free_irq_msix_misc(pf); +	ice_clear_interrupt_scheme(pf); +	ice_deinit_pf(pf); +	ice_deinit_hw(&pf->hw); +} -	/* record the sw_id available for later use */ -	pf->first_sw->sw_id = hw->port_info->sw_id; +static void ice_init_features(struct ice_pf *pf) +{ +	struct device *dev = ice_pf_to_dev(pf); -	err = ice_setup_pf_sw(pf); -	if (err) { -		dev_err(dev, "probe failed due to setup PF switch: %d\n", err); -		goto err_alloc_sw_unroll; -	} +	if (ice_is_safe_mode(pf)) +		return; -	clear_bit(ICE_SERVICE_DIS, pf->state); +	/* initialize DDP driven features */ +	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) +		ice_ptp_init(pf); -	/* tell the firmware we are up */ -	err = ice_send_version(pf); -	if (err) { -		dev_err(dev, "probe failed sending driver version %s. error: %d\n", -			UTS_RELEASE, err); -		goto err_send_version_unroll; +	if (ice_is_feature_supported(pf, ICE_F_GNSS)) +		ice_gnss_init(pf); + +	/* Note: Flow director init failure is non-fatal to load */ +	if (ice_init_fdir(pf)) +		dev_err(dev, "could not initialize flow director\n"); + +	/* Note: DCB init failure is non-fatal to load */ +	if (ice_init_pf_dcb(pf, false)) { +		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); +		clear_bit(ICE_FLAG_DCB_ENA, pf->flags); +	} else { +		ice_cfg_lldp_mib_change(&pf->hw, true);  	} -	/* since everything is good, start the service timer */ -	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); +	if (ice_init_lag(pf)) +		dev_warn(dev, "Failed to init link aggregation support\n"); +} + +static void ice_deinit_features(struct ice_pf *pf) +{ +	ice_deinit_lag(pf); +	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) +		ice_cfg_lldp_mib_change(&pf->hw, false); +	ice_deinit_fdir(pf); +	if (ice_is_feature_supported(pf, ICE_F_GNSS)) +		ice_gnss_exit(pf); +	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) +		ice_ptp_release(pf); +} + +static void ice_init_wakeup(struct ice_pf *pf) +{ +	/* Save wakeup reason register for later use */ +	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); + +	/* check for a power management event */ +	ice_print_wake_reason(pf); + +	/* clear wake status, all bits */ +	wr32(&pf->hw, PFPM_WUS, U32_MAX); + +	/* Disable WoL at init, wait for user to enable */ +	device_set_wakeup_enable(ice_pf_to_dev(pf), false); +} + +static int ice_init_link(struct ice_pf *pf) +{ +	struct device *dev = ice_pf_to_dev(pf); +	int err;  	err = ice_init_link_events(pf->hw.port_info);  	if (err) {  		dev_err(dev, "ice_init_link_events failed: %d\n", err); -		goto err_send_version_unroll; +		return err;  	}  	/* not a fatal error if this fails */ @@ -4864,106 +4897,350 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)  		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);  	} -	ice_verify_cacheline_size(pf); +	return err; +} -	/* Save wakeup reason register for later use */ -	pf->wakeup_reason = rd32(hw, PFPM_WUS); +static int ice_init_pf_sw(struct ice_pf *pf) +{ +	bool dvm = ice_is_dvm_ena(&pf->hw); +	struct ice_vsi *vsi; +	int err; -	/* check for a power management event */ -	ice_print_wake_reason(pf); +	/* create switch struct for the switch element created by FW on boot */ +	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); +	if (!pf->first_sw) +		return -ENOMEM; -	/* clear wake status, all bits */ -	wr32(hw, PFPM_WUS, U32_MAX); +	if (pf->hw.evb_veb) +		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; +	else +		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; -	/* Disable WoL at init, wait for user to enable */ -	device_set_wakeup_enable(dev, false); +	pf->first_sw->pf = pf; -	if (ice_is_safe_mode(pf)) { -		ice_set_safe_mode_vlan_cfg(pf); -		goto probe_done; +	/* record the sw_id available for later use */ +	pf->first_sw->sw_id = pf->hw.port_info->sw_id; + +	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); +	if (err) +		goto err_aq_set_port_params; + +	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); +	if (!vsi) { +		err = -ENOMEM; +		goto err_pf_vsi_setup;  	} -	/* initialize DDP driven features */ -	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) -		ice_ptp_init(pf); +	return 0; -	if (ice_is_feature_supported(pf, ICE_F_GNSS)) -		ice_gnss_init(pf); +err_pf_vsi_setup: +err_aq_set_port_params: +	kfree(pf->first_sw); +	return err; +} -	/* Note: Flow director init failure is non-fatal to load */ -	if (ice_init_fdir(pf)) -		dev_err(dev, "could not initialize flow director\n"); +static void ice_deinit_pf_sw(struct ice_pf *pf) +{ +	struct ice_vsi *vsi = ice_get_main_vsi(pf); -	/* Note: DCB init failure is non-fatal to load */ -	if (ice_init_pf_dcb(pf, false)) { -		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); -		clear_bit(ICE_FLAG_DCB_ENA, pf->flags); -	} else { -		ice_cfg_lldp_mib_change(&pf->hw, true); +	if (!vsi) +		return; + +	ice_vsi_release(vsi); +	kfree(pf->first_sw); +} + +static int ice_alloc_vsis(struct ice_pf *pf) +{ +	struct device *dev = ice_pf_to_dev(pf); + +	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; +	if (!pf->num_alloc_vsi) +		return -EIO; + +	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { +		dev_warn(dev, +			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", +			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); +		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;  	} -	if (ice_init_lag(pf)) -		dev_warn(dev, "Failed to init link aggregation support\n"); +	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), +			       GFP_KERNEL); +	if (!pf->vsi) +		return -ENOMEM; + +	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, +				     sizeof(*pf->vsi_stats), GFP_KERNEL); +	if (!pf->vsi_stats) { +		devm_kfree(dev, pf->vsi); +		return -ENOMEM; +	} -	/* print PCI link speed and width */ -	pcie_print_link_status(pf->pdev); +	return 0; +} -probe_done: -	err = ice_register_netdev(pf); -	if (err) -		goto err_netdev_reg; +static void ice_dealloc_vsis(struct ice_pf *pf) +{ +	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); +	pf->vsi_stats = NULL; + +	pf->num_alloc_vsi = 0; +	devm_kfree(ice_pf_to_dev(pf), pf->vsi); +	pf->vsi = NULL; +} + +static int ice_init_devlink(struct ice_pf *pf) +{ +	int err;  	err = ice_devlink_register_params(pf);  	if (err) -		goto err_netdev_reg; +		return err; + +	ice_devlink_init_regions(pf); +	ice_devlink_register(pf); + +	return 0; +} + +static void ice_deinit_devlink(struct ice_pf *pf) +{ +	ice_devlink_unregister(pf); +	ice_devlink_destroy_regions(pf); +	ice_devlink_unregister_params(pf); +} + +static int ice_init(struct ice_pf *pf) +{ +	int err; + +	err = ice_init_dev(pf); +	if (err) +		return err; + +	err = ice_alloc_vsis(pf); +	if (err) +		goto err_alloc_vsis; + +	err = ice_init_pf_sw(pf); +	if (err) +		goto err_init_pf_sw; + +	ice_init_wakeup(pf); + +	err = ice_init_link(pf); +	if (err) +		goto err_init_link; + +	err = ice_send_version(pf); +	if (err) +		goto err_init_link; + +	ice_verify_cacheline_size(pf); + +	if (ice_is_safe_mode(pf)) +		ice_set_safe_mode_vlan_cfg(pf); +	else +		/* print PCI link speed and width */ +		pcie_print_link_status(pf->pdev);  	/* ready to go, so clear down state bit */  	clear_bit(ICE_DOWN, pf->state); -	if (ice_is_rdma_ena(pf)) { -		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); -		if (pf->aux_idx < 0) { -			dev_err(dev, "Failed to allocate device ID for AUX driver\n"); -			err = -ENOMEM; -			goto err_devlink_reg_param; -		} +	clear_bit(ICE_SERVICE_DIS, pf->state); -		err = ice_init_rdma(pf); -		if (err) { -			dev_err(dev, "Failed to initialize RDMA: %d\n", err); -			err = -EIO; -			goto err_init_aux_unroll; -		} -	} else { -		dev_warn(dev, "RDMA is not supported on this device\n"); -	} +	/* since everything is good, start the service timer */ +	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); -	ice_devlink_register(pf);  	return 0; -err_init_aux_unroll: -	pf->adev = NULL; -	ida_free(&ice_aux_ida, pf->aux_idx); -err_devlink_reg_param: -	ice_devlink_unregister_params(pf); -err_netdev_reg: -err_send_version_unroll: -	ice_vsi_release_all(pf); -err_alloc_sw_unroll: +err_init_link: +	ice_deinit_pf_sw(pf); +err_init_pf_sw: +	ice_dealloc_vsis(pf); +err_alloc_vsis: +	ice_deinit_dev(pf); +	return err; +} + +static void ice_deinit(struct ice_pf *pf) +{  	set_bit(ICE_SERVICE_DIS, pf->state);  	set_bit(ICE_DOWN, pf->state); -	devm_kfree(dev, pf->first_sw); -err_msix_misc_unroll: -	ice_free_irq_msix_misc(pf); -err_init_interrupt_unroll: -	ice_clear_interrupt_scheme(pf); -err_init_vsi_unroll: -	devm_kfree(dev, pf->vsi); -err_init_pf_unroll: -	ice_deinit_pf(pf); -	ice_devlink_destroy_regions(pf); -	ice_deinit_hw(hw); -err_exit_unroll: -	pci_disable_pcie_error_reporting(pdev); + +	ice_deinit_pf_sw(pf); +	ice_dealloc_vsis(pf); +	ice_deinit_dev(pf); +} + +/** + * ice_load - load pf by init hw and starting VSI + * @pf: pointer to the pf instance + */ +int ice_load(struct ice_pf *pf) +{ +	struct ice_vsi_cfg_params params = {}; +	struct ice_vsi *vsi; +	int err; + +	err = ice_reset(&pf->hw, ICE_RESET_PFR); +	if (err) +		return err; + +	err = ice_init_dev(pf); +	if (err) +		return err; + +	vsi = ice_get_main_vsi(pf); + +	params = ice_vsi_to_params(vsi); +	params.flags = ICE_VSI_FLAG_INIT; + +	err = ice_vsi_cfg(vsi, ¶ms); +	if (err) +		goto err_vsi_cfg; + +	err = ice_start_eth(ice_get_main_vsi(pf)); +	if (err) +		goto err_start_eth; + +	err = ice_init_rdma(pf); +	if (err) +		goto err_init_rdma; + +	ice_init_features(pf); +	ice_service_task_restart(pf); + +	clear_bit(ICE_DOWN, pf->state); + +	return 0; + +err_init_rdma: +	ice_vsi_close(ice_get_main_vsi(pf)); +err_start_eth: +	ice_vsi_decfg(ice_get_main_vsi(pf)); +err_vsi_cfg: +	ice_deinit_dev(pf); +	return err; +} + +/** + * ice_unload - unload pf by stopping VSI and deinit hw + * @pf: pointer to the pf instance + */ +void ice_unload(struct ice_pf *pf) +{ +	ice_deinit_features(pf); +	ice_deinit_rdma(pf); +	ice_vsi_close(ice_get_main_vsi(pf)); +	ice_vsi_decfg(ice_get_main_vsi(pf)); +	ice_deinit_dev(pf); +} + +/** + * ice_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in ice_pci_tbl + * + * Returns 0 on success, negative on failure + */ +static int +ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) +{ +	struct device *dev = &pdev->dev; +	struct ice_pf *pf; +	struct ice_hw *hw; +	int err; + +	if (pdev->is_virtfn) { +		dev_err(dev, "can't probe a virtual function\n"); +		return -EINVAL; +	} + +	/* this driver uses devres, see +	 * Documentation/driver-api/driver-model/devres.rst +	 */ +	err = pcim_enable_device(pdev); +	if (err) +		return err; + +	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); +	if (err) { +		dev_err(dev, "BAR0 I/O map error %d\n", err); +		return err; +	} + +	pf = ice_allocate_pf(dev); +	if (!pf) +		return -ENOMEM; + +	/* initialize Auxiliary index to invalid value */ +	pf->aux_idx = -1; + +	/* set up for high or low DMA */ +	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); +	if (err) { +		dev_err(dev, "DMA configuration failed: 0x%x\n", err); +		return err; +	} + +	pci_set_master(pdev); + +	pf->pdev = pdev; +	pci_set_drvdata(pdev, pf); +	set_bit(ICE_DOWN, pf->state); +	/* Disable service task until DOWN bit is cleared */ +	set_bit(ICE_SERVICE_DIS, pf->state); + +	hw = &pf->hw; +	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; +	pci_save_state(pdev); + +	hw->back = pf; +	hw->port_info = NULL; +	hw->vendor_id = pdev->vendor; +	hw->device_id = pdev->device; +	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); +	hw->subsystem_vendor_id = pdev->subsystem_vendor; +	hw->subsystem_device_id = pdev->subsystem_device; +	hw->bus.device = PCI_SLOT(pdev->devfn); +	hw->bus.func = PCI_FUNC(pdev->devfn); +	ice_set_ctrlq_len(hw); + +	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); + +#ifndef CONFIG_DYNAMIC_DEBUG +	if (debug < -1) +		hw->debug_mask = debug; +#endif + +	err = ice_init(pf); +	if (err) +		goto err_init; + +	err = ice_init_eth(pf); +	if (err) +		goto err_init_eth; + +	err = ice_init_rdma(pf); +	if (err) +		goto err_init_rdma; + +	err = ice_init_devlink(pf); +	if (err) +		goto err_init_devlink; + +	ice_init_features(pf); + +	return 0; + +err_init_devlink: +	ice_deinit_rdma(pf); +err_init_rdma: +	ice_deinit_eth(pf); +err_init_eth: +	ice_deinit(pf); +err_init:  	pci_disable_device(pdev);  	return err;  } @@ -5038,49 +5315,33 @@ static void ice_remove(struct pci_dev *pdev)  	struct ice_pf *pf = pci_get_drvdata(pdev);  	int i; -	ice_devlink_unregister(pf);  	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {  		if (!ice_is_reset_in_progress(pf->state))  			break;  		msleep(100);  	} -	ice_tc_indir_block_remove(pf); -  	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {  		set_bit(ICE_VF_RESETS_DISABLED, pf->state);  		ice_free_vfs(pf);  	}  	ice_service_task_stop(pf); -  	ice_aq_cancel_waiting_tasks(pf); -	ice_unplug_aux_dev(pf); -	if (pf->aux_idx >= 0) -		ida_free(&ice_aux_ida, pf->aux_idx); -	ice_devlink_unregister_params(pf);  	set_bit(ICE_DOWN, pf->state); -	ice_deinit_lag(pf); -	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) -		ice_ptp_release(pf); -	if (ice_is_feature_supported(pf, ICE_F_GNSS)) -		ice_gnss_exit(pf);  	if (!ice_is_safe_mode(pf))  		ice_remove_arfs(pf); -	ice_setup_mc_magic_wake(pf); +	ice_deinit_features(pf); +	ice_deinit_devlink(pf); +	ice_deinit_rdma(pf); +	ice_deinit_eth(pf); +	ice_deinit(pf); +  	ice_vsi_release_all(pf); -	mutex_destroy(&(&pf->hw)->fdir_fltr_lock); + +	ice_setup_mc_magic_wake(pf);  	ice_set_wake(pf); -	ice_free_irq_msix_misc(pf); -	ice_for_each_vsi(pf, i) { -		if (!pf->vsi[i]) -			continue; -		ice_vsi_free_q_vectors(pf->vsi[i]); -	} -	ice_deinit_pf(pf); -	ice_devlink_destroy_regions(pf); -	ice_deinit_hw(&pf->hw);  	/* Issue a PFR as part of the prescribed driver unload flow.  Do not  	 * do it via ice_schedule_reset() since there is no need to rebuild @@ -5088,8 +5349,6 @@ static void ice_remove(struct pci_dev *pdev)  	 */  	ice_reset(&pf->hw, ICE_RESET_PFR);  	pci_wait_for_pending_transaction(pdev); -	ice_clear_interrupt_scheme(pf); -	pci_disable_pcie_error_reporting(pdev);  	pci_disable_device(pdev);  } @@ -5517,7 +5776,7 @@ static int __init ice_module_init(void)  	pr_info("%s\n", ice_driver_string);  	pr_info("%s\n", ice_copyright); -	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); +	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);  	if (!ice_wq) {  		pr_err("Failed to create workqueue\n");  		return -ENOMEM; @@ -6123,24 +6382,21 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)  }  /** - * ice_vsi_cfg - Setup the VSI + * ice_vsi_cfg_lan - Setup the VSI lan related config   * @vsi: the VSI being configured   *   * Return 0 on success and negative value on error   */ -int ice_vsi_cfg(struct ice_vsi *vsi) +int ice_vsi_cfg_lan(struct ice_vsi *vsi)  {  	int err; -	if (vsi->netdev) { +	if (vsi->netdev && vsi->type == ICE_VSI_PF) {  		ice_set_rx_mode(vsi->netdev); -		if (vsi->type != ICE_VSI_LB) { -			err = ice_vsi_vlan_setup(vsi); - -			if (err) -				return err; -		} +		err = ice_vsi_vlan_setup(vsi); +		if (err) +			return err;  	}  	ice_vsi_cfg_dcb_rings(vsi); @@ -6321,19 +6577,20 @@ static int ice_up_complete(struct ice_vsi *vsi)  	if (vsi->port_info &&  	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && -	    vsi->netdev) { +	    vsi->netdev && vsi->type == ICE_VSI_PF) {  		ice_print_link_msg(vsi, true);  		netif_tx_start_all_queues(vsi->netdev);  		netif_carrier_on(vsi->netdev); -		if (!ice_is_e810(&pf->hw)) -			ice_ptp_link_change(pf, pf->hw.pf_id, true); +		ice_ptp_link_change(pf, pf->hw.pf_id, true);  	}  	/* Perform an initial read of the statistics registers now to  	 * set the baseline so counters are ready when interface is up  	 */  	ice_update_eth_stats(vsi); -	ice_service_task_schedule(pf); + +	if (vsi->type == ICE_VSI_PF) +		ice_service_task_schedule(pf);  	return 0;  } @@ -6346,7 +6603,7 @@ int ice_up(struct ice_vsi *vsi)  {  	int err; -	err = ice_vsi_cfg(vsi); +	err = ice_vsi_cfg_lan(vsi);  	if (!err)  		err = ice_up_complete(vsi); @@ -6370,10 +6627,10 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,  	unsigned int start;  	do { -		start = u64_stats_fetch_begin_irq(syncp); +		start = u64_stats_fetch_begin(syncp);  		*pkts = stats.pkts;  		*bytes = stats.bytes; -	} while (u64_stats_fetch_retry_irq(syncp, start)); +	} while (u64_stats_fetch_retry(syncp, start));  }  /** @@ -6395,14 +6652,16 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,  		u64 pkts = 0, bytes = 0;  		ring = READ_ONCE(rings[i]); -		if (!ring) +		if (!ring || !ring->ring_stats)  			continue; -		ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); +		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, +					     ring->ring_stats->stats, &pkts, +					     &bytes);  		vsi_stats->tx_packets += pkts;  		vsi_stats->tx_bytes += bytes; -		vsi->tx_restart += ring->tx_stats.restart_q; -		vsi->tx_busy += ring->tx_stats.tx_busy; -		vsi->tx_linearize += ring->tx_stats.tx_linearize; +		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; +		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; +		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;  	}  } @@ -6412,6 +6671,7 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,   */  static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)  { +	struct rtnl_link_stats64 *net_stats, *stats_prev;  	struct rtnl_link_stats64 *vsi_stats;  	u64 pkts, bytes;  	int i; @@ -6436,12 +6696,16 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)  	/* update Rx rings counters */  	ice_for_each_rxq(vsi, i) {  		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); +		struct ice_ring_stats *ring_stats; -		ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes); +		ring_stats = ring->ring_stats; +		ice_fetch_u64_stats_per_ring(&ring_stats->syncp, +					     ring_stats->stats, &pkts, +					     &bytes);  		vsi_stats->rx_packets += pkts;  		vsi_stats->rx_bytes += bytes; -		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; -		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; +		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; +		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;  	}  	/* update XDP Tx rings counters */ @@ -6451,10 +6715,28 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)  	rcu_read_unlock(); -	vsi->net_stats.tx_packets = vsi_stats->tx_packets; -	vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; -	vsi->net_stats.rx_packets = vsi_stats->rx_packets; -	vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; +	net_stats = &vsi->net_stats; +	stats_prev = &vsi->net_stats_prev; + +	/* clear prev counters after reset */ +	if (vsi_stats->tx_packets < stats_prev->tx_packets || +	    vsi_stats->rx_packets < stats_prev->rx_packets) { +		stats_prev->tx_packets = 0; +		stats_prev->tx_bytes = 0; +		stats_prev->rx_packets = 0; +		stats_prev->rx_bytes = 0; +	} + +	/* update netdev counters */ +	net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; +	net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; +	net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; +	net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; + +	stats_prev->tx_packets = vsi_stats->tx_packets; +	stats_prev->tx_bytes = vsi_stats->tx_bytes; +	stats_prev->rx_packets = vsi_stats->rx_packets; +	stats_prev->rx_bytes = vsi_stats->rx_bytes;  	kfree(vsi_stats);  } @@ -6516,6 +6798,9 @@ void ice_update_pf_stats(struct ice_pf *pf)  	prev_ps = &pf->stats_prev;  	cur_ps = &pf->stats; +	if (ice_is_reset_in_progress(pf->state)) +		pf->stat_prev_loaded = false; +  	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,  			  &prev_ps->eth.rx_bytes,  			  &cur_ps->eth.rx_bytes); @@ -6730,8 +7015,7 @@ int ice_down(struct ice_vsi *vsi)  	if (vsi->netdev && vsi->type == ICE_VSI_PF) {  		vlan_err = ice_vsi_del_vlan_zero(vsi); -		if (!ice_is_e810(&vsi->back->hw)) -			ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); +		ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);  		netif_carrier_off(vsi->netdev);  		netif_tx_disable(vsi->netdev);  	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { @@ -6887,7 +7171,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi)  	if (err)  		goto err_setup_rx; -	err = ice_vsi_cfg(vsi); +	err = ice_vsi_cfg_lan(vsi);  	if (err)  		goto err_setup_rx; @@ -6941,7 +7225,7 @@ int ice_vsi_open(struct ice_vsi *vsi)  	if (err)  		goto err_setup_rx; -	err = ice_vsi_cfg(vsi); +	err = ice_vsi_cfg_lan(vsi);  	if (err)  		goto err_setup_rx; @@ -7026,7 +7310,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)  			continue;  		/* rebuild the VSI */ -		err = ice_vsi_rebuild(vsi, true); +		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);  		if (err) {  			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",  				err, vsi->idx, ice_vsi_type_str(type)); @@ -7282,18 +7566,6 @@ clear_recovery:  }  /** - * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP - * @vsi: Pointer to VSI structure - */ -static int ice_max_xdp_frame_size(struct ice_vsi *vsi) -{ -	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) -		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; -	else -		return ICE_RXBUF_3072; -} - -/**   * ice_change_mtu - NDO callback to change the MTU   * @netdev: network interface device structure   * @new_mtu: new value for maximum frame size @@ -7305,6 +7577,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)  	struct ice_netdev_priv *np = netdev_priv(netdev);  	struct ice_vsi *vsi = np->vsi;  	struct ice_pf *pf = vsi->back; +	struct bpf_prog *prog;  	u8 count = 0;  	int err = 0; @@ -7313,7 +7586,8 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)  		return 0;  	} -	if (ice_is_xdp_ena_vsi(vsi)) { +	prog = vsi->xdp_prog; +	if (prog && !prog->aux->xdp_has_frags) {  		int frame_size = ice_max_xdp_frame_size(vsi);  		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { @@ -7321,6 +7595,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)  				   frame_size - ICE_ETH_PKT_HDR_PAD);  			return -EINVAL;  		} +	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { +		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { +			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", +				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); +			return -EINVAL; +		}  	}  	/* if a reset is in progress, wait for some time for it to complete */ @@ -8283,7 +8563,7 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)  		rule.rid = fltr->rid;  		rule.rule_id = fltr->rule_id; -		rule.vsi_handle = fltr->dest_id; +		rule.vsi_handle = fltr->dest_vsi_handle;  		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);  		if (status) {  			if (status == -ENOENT) @@ -8371,12 +8651,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)  		/* clear the VSI from scheduler tree */  		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); -		/* Delete VSI from FW */ +		/* Delete VSI from FW, PF and HW VSI arrays */  		ice_vsi_delete(ch->ch_vsi); -		/* Delete VSI from PF and HW VSI arrays */ -		ice_vsi_clear(ch->ch_vsi); -  		/* free the channel */  		kfree(ch);  	} @@ -8435,7 +8712,7 @@ static int ice_rebuild_channels(struct ice_pf *pf)  		type = vsi->type;  		/* rebuild ADQ VSI */ -		err = ice_vsi_rebuild(vsi, true); +		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);  		if (err) {  			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",  				ice_vsi_type_str(type), vsi->idx, err); @@ -8595,6 +8872,12 @@ static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)  	switch (mode) {  	case TC_MQPRIO_MODE_CHANNEL: +		if (pf->hw.port_info->is_custom_tx_enabled) { +			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n"); +			return -EBUSY; +		} +		ice_tear_down_devlink_rate_tree(pf); +  		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);  		if (ret) {  			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", @@ -8661,14 +8944,14 @@ config_tcf:  	cur_rxq = vsi->num_rxq;  	/* proceed with rebuild main VSI using correct number of queues */ -	ret = ice_vsi_rebuild(vsi, false); +	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);  	if (ret) {  		/* fallback to current number of queues */  		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");  		vsi->req_txq = cur_txq;  		vsi->req_rxq = cur_rxq;  		clear_bit(ICE_RESET_FAILED, pf->state); -		if (ice_vsi_rebuild(vsi, false)) { +		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {  			dev_err(dev, "Rebuild of main VSI failed again\n");  			return ret;  		} @@ -9108,5 +9391,4 @@ static const struct net_device_ops ice_netdev_ops = {  	.ndo_bpf = ice_xdp,  	.ndo_xdp_xmit = ice_xdp_xmit,  	.ndo_xsk_wakeup = ice_xsk_wakeup, -	.ndo_get_devlink_port = ice_get_devlink_port,  }; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index c262dc886e6a..f6f52a248066 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -662,7 +662,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,  		/* Verify that the simple checksum is zero */  		for (i = 0; i < sizeof(*tmp); i++) -			/* cppcheck-suppress objectIndex */  			sum += ((u8 *)tmp)[i];  		if (sum) { diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 0f668468d141..ac6f06f9a2ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -600,6 +600,23 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)  }  /** + * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps + * @tx: the PTP Tx timestamp tracker to check + * + * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready + * to accept new timestamp requests. + * + * Assumes the tx->lock spinlock is already held. + */ +static bool +ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) +{ +	lockdep_assert_held(&tx->lock); + +	return tx->init && !tx->calibrating; +} + +/**   * ice_ptp_tx_tstamp - Process Tx timestamps for a port   * @tx: the PTP Tx timestamp tracker   * @@ -608,11 +625,13 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)   *   * If a given index has a valid timestamp, perform the following steps:   * - * 1) copy the timestamp out of the PHY register - * 4) clear the timestamp valid bit in the PHY register - * 5) unlock the index by clearing the associated in_use bit. - * 2) extend the 40b timestamp value to get a 64bit timestamp - * 3) send that timestamp to the stack + * 1) check that the timestamp request is not stale + * 2) check that a timestamp is ready and available in the PHY memory bank + * 3) read and copy the timestamp out of the PHY register + * 4) unlock the index by clearing the associated in_use bit + * 5) check if the timestamp is stale, and discard if so + * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value + * 7) send this 64 bit timestamp to the stack   *   * Returns true if all timestamps were handled, and false if any slots remain   * without a timestamp. @@ -623,24 +642,46 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)   * interrupt. In some cases hardware might not interrupt us again when the   * timestamp is captured.   * - * Note that we only take the tracking lock when clearing the bit and when - * checking if we need to re-queue this task. The only place where bits can be - * set is the hard xmit routine where an SKB has a request flag set. The only - * places where we clear bits are this work function, or the periodic cleanup - * thread. If the cleanup thread clears a bit we're processing we catch it - * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread - * starts a new timestamp, we might not begin processing it right away but we - * will notice it at the end when we re-queue the task. If a Tx thread starts - * a new timestamp just after this function exits without re-queuing, - * the interrupt when the timestamp finishes should trigger. Avoiding holding - * the lock for the entire function is important in order to ensure that Tx - * threads do not get blocked while waiting for the lock. + * Note that we do not hold the tracking lock while reading the Tx timestamp. + * This is because reading the timestamp requires taking a mutex that might + * sleep. + * + * The only place where we set in_use is when a new timestamp is initiated + * with a slot index. This is only called in the hard xmit routine where an + * SKB has a request flag set. The only places where we clear this bit is this + * function, or during teardown when the Tx timestamp tracker is being + * removed. A timestamp index will never be re-used until the in_use bit for + * that index is cleared. + * + * If a Tx thread starts a new timestamp, we might not begin processing it + * right away but we will notice it at the end when we re-queue the task. + * + * If a Tx thread starts a new timestamp just after this function exits, the + * interrupt for that timestamp should re-trigger this function once + * a timestamp is ready. + * + * In cases where the PTP hardware clock was directly adjusted, some + * timestamps may not be able to safely use the timestamp extension math. In + * this case, software will set the stale bit for any outstanding Tx + * timestamps when the clock is adjusted. Then this function will discard + * those captured timestamps instead of sending them to the stack. + * + * If a Tx packet has been waiting for more than 2 seconds, it is not possible + * to correctly extend the timestamp using the cached PHC time. It is + * extremely unlikely that a packet will ever take this long to timestamp. If + * we detect a Tx timestamp request that has waited for this long we assume + * the packet will never be sent by hardware and discard it without reading + * the timestamp register.   */  static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)  {  	struct ice_ptp_port *ptp_port; -	bool ts_handled = true; +	bool more_timestamps;  	struct ice_pf *pf; +	struct ice_hw *hw; +	u64 tstamp_ready; +	bool link_up; +	int err;  	u8 idx;  	if (!tx->init) @@ -648,44 +689,89 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)  	ptp_port = container_of(tx, struct ice_ptp_port, tx);  	pf = ptp_port_to_pf(ptp_port); +	hw = &pf->hw; + +	/* Read the Tx ready status first */ +	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); +	if (err) +		return false; + +	/* Drop packets if the link went down */ +	link_up = ptp_port->link_up;  	for_each_set_bit(idx, tx->in_use, tx->len) {  		struct skb_shared_hwtstamps shhwtstamps = {}; -		u8 phy_idx = idx + tx->quad_offset; -		u64 raw_tstamp, tstamp; +		u8 phy_idx = idx + tx->offset; +		u64 raw_tstamp = 0, tstamp; +		bool drop_ts = !link_up;  		struct sk_buff *skb; -		int err; + +		/* Drop packets which have waited for more than 2 seconds */ +		if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { +			drop_ts = true; + +			/* Count the number of Tx timestamps that timed out */ +			pf->ptp.tx_hwtstamp_timeouts++; +		} + +		/* Only read a timestamp from the PHY if its marked as ready +		 * by the tstamp_ready register. This avoids unnecessary +		 * reading of timestamps which are not yet valid. This is +		 * important as we must read all timestamps which are valid +		 * and only timestamps which are valid during each interrupt. +		 * If we do not, the hardware logic for generating a new +		 * interrupt can get stuck on some devices. +		 */ +		if (!(tstamp_ready & BIT_ULL(phy_idx))) { +			if (drop_ts) +				goto skip_ts_read; + +			continue; +		}  		ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); -		err = ice_read_phy_tstamp(&pf->hw, tx->quad, phy_idx, -					  &raw_tstamp); -		if (err) +		err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); +		if (err && !drop_ts)  			continue;  		ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); -		/* Check if the timestamp is invalid or stale */ -		if (!(raw_tstamp & ICE_PTP_TS_VALID) || +		/* For PHYs which don't implement a proper timestamp ready +		 * bitmap, verify that the timestamp value is different +		 * from the last cached timestamp. If it is not, skip this for +		 * now assuming it hasn't yet been captured by hardware. +		 */ +		if (!drop_ts && tx->verify_cached &&  		    raw_tstamp == tx->tstamps[idx].cached_tstamp)  			continue; -		/* The timestamp is valid, so we'll go ahead and clear this -		 * index and then send the timestamp up to the stack. -		 */ +		/* Discard any timestamp value without the valid bit set */ +		if (!(raw_tstamp & ICE_PTP_TS_VALID)) +			drop_ts = true; + +skip_ts_read:  		spin_lock(&tx->lock); -		tx->tstamps[idx].cached_tstamp = raw_tstamp; +		if (tx->verify_cached && raw_tstamp) +			tx->tstamps[idx].cached_tstamp = raw_tstamp;  		clear_bit(idx, tx->in_use);  		skb = tx->tstamps[idx].skb;  		tx->tstamps[idx].skb = NULL; +		if (test_and_clear_bit(idx, tx->stale)) +			drop_ts = true;  		spin_unlock(&tx->lock); -		/* it's (unlikely but) possible we raced with the cleanup -		 * thread for discarding old timestamp requests. +		/* It is unlikely but possible that the SKB will have been +		 * flushed at this point due to link change or teardown.  		 */  		if (!skb)  			continue; +		if (drop_ts) { +			dev_kfree_skb_any(skb); +			continue; +		} +  		/* Extend the timestamp using cached PHC time */  		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);  		if (tstamp) { @@ -701,11 +787,10 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)  	 * poll for remaining timestamps.  	 */  	spin_lock(&tx->lock); -	if (!bitmap_empty(tx->in_use, tx->len)) -		ts_handled = false; +	more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);  	spin_unlock(&tx->lock); -	return ts_handled; +	return !more_timestamps;  }  /** @@ -713,26 +798,33 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)   * @tx: Tx tracking structure to initialize   *   * Assumes that the length has already been initialized. Do not call directly, - * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead. + * use the ice_ptp_init_tx_* instead.   */  static int  ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)  { -	tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL); -	if (!tx->tstamps) -		return -ENOMEM; +	unsigned long *in_use, *stale; +	struct ice_tx_tstamp *tstamps; + +	tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); +	in_use = bitmap_zalloc(tx->len, GFP_KERNEL); +	stale = bitmap_zalloc(tx->len, GFP_KERNEL); + +	if (!tstamps || !in_use || !stale) { +		kfree(tstamps); +		bitmap_free(in_use); +		bitmap_free(stale); -	tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL); -	if (!tx->in_use) { -		kfree(tx->tstamps); -		tx->tstamps = NULL;  		return -ENOMEM;  	} -	spin_lock_init(&tx->lock); - +	tx->tstamps = tstamps; +	tx->in_use = in_use; +	tx->stale = stale;  	tx->init = 1; +	spin_lock_init(&tx->lock); +  	return 0;  } @@ -740,31 +832,71 @@ ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)   * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker   * @pf: Board private structure   * @tx: the tracker to flush + * + * Called during teardown when a Tx tracker is being removed.   */  static void  ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)  { +	struct ice_hw *hw = &pf->hw; +	u64 tstamp_ready; +	int err;  	u8 idx; -	for (idx = 0; idx < tx->len; idx++) { -		u8 phy_idx = idx + tx->quad_offset; +	err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); +	if (err) { +		dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", +			tx->block, err); + +		/* If we fail to read the Tx timestamp ready bitmap just +		 * skip clearing the PHY timestamps. +		 */ +		tstamp_ready = 0; +	} + +	for_each_set_bit(idx, tx->in_use, tx->len) { +		u8 phy_idx = idx + tx->offset; +		struct sk_buff *skb; + +		/* In case this timestamp is ready, we need to clear it. */ +		if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) +			ice_clear_phy_tstamp(hw, tx->block, phy_idx);  		spin_lock(&tx->lock); -		if (tx->tstamps[idx].skb) { -			dev_kfree_skb_any(tx->tstamps[idx].skb); -			tx->tstamps[idx].skb = NULL; -			pf->ptp.tx_hwtstamp_flushed++; -		} +		skb = tx->tstamps[idx].skb; +		tx->tstamps[idx].skb = NULL;  		clear_bit(idx, tx->in_use); +		clear_bit(idx, tx->stale);  		spin_unlock(&tx->lock); -		/* Clear any potential residual timestamp in the PHY block */ -		if (!pf->hw.reset_ongoing) -			ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx); +		/* Count the number of Tx timestamps flushed */ +		pf->ptp.tx_hwtstamp_flushed++; + +		/* Free the SKB after we've cleared the bit */ +		dev_kfree_skb_any(skb);  	}  }  /** + * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale + * @tx: the tracker to mark + * + * Mark currently outstanding Tx timestamps as stale. This prevents sending + * their timestamp value to the stack. This is required to prevent extending + * the 40bit hardware timestamp incorrectly. + * + * This should be called when the PTP clock is modified such as after a set + * time request. + */ +static void +ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) +{ +	spin_lock(&tx->lock); +	bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); +	spin_unlock(&tx->lock); +} + +/**   * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker   * @pf: Board private structure   * @tx: Tx tracking structure to release @@ -774,7 +906,12 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)  static void  ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)  { +	spin_lock(&tx->lock);  	tx->init = 0; +	spin_unlock(&tx->lock); + +	/* wait for potentially outstanding interrupt to complete */ +	synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);  	ice_ptp_flush_tx_tracker(pf, tx); @@ -784,6 +921,9 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)  	bitmap_free(tx->in_use);  	tx->in_use = NULL; +	bitmap_free(tx->stale); +	tx->stale = NULL; +  	tx->len = 0;  } @@ -801,9 +941,10 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)  static int  ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)  { -	tx->quad = port / ICE_PORTS_PER_QUAD; -	tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT; -	tx->len = INDEX_PER_PORT; +	tx->block = port / ICE_PORTS_PER_QUAD; +	tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; +	tx->len = INDEX_PER_PORT_E822; +	tx->verify_cached = 0;  	return ice_ptp_alloc_tx_tracker(tx);  } @@ -819,59 +960,19 @@ ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)  static int  ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)  { -	tx->quad = pf->hw.port_info->lport; -	tx->quad_offset = 0; -	tx->len = INDEX_PER_QUAD; +	tx->block = pf->hw.port_info->lport; +	tx->offset = 0; +	tx->len = INDEX_PER_PORT_E810; +	/* The E810 PHY does not provide a timestamp ready bitmap. Instead, +	 * verify new timestamps against cached copy of the last read +	 * timestamp. +	 */ +	tx->verify_cached = 1;  	return ice_ptp_alloc_tx_tracker(tx);  }  /** - * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped - * @pf: pointer to the PF struct - * @tx: PTP Tx tracker to clean up - * - * Loop through the Tx timestamp requests and see if any of them have been - * waiting for a long time. Discard any SKBs that have been waiting for more - * than 2 seconds. This is long enough to be reasonably sure that the - * timestamp will never be captured. This might happen if the packet gets - * discarded before it reaches the PHY timestamping block. - */ -static void ice_ptp_tx_tstamp_cleanup(struct ice_pf *pf, struct ice_ptp_tx *tx) -{ -	struct ice_hw *hw = &pf->hw; -	u8 idx; - -	if (!tx->init) -		return; - -	for_each_set_bit(idx, tx->in_use, tx->len) { -		struct sk_buff *skb; -		u64 raw_tstamp; - -		/* Check if this SKB has been waiting for too long */ -		if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ)) -			continue; - -		/* Read tstamp to be able to use this register again */ -		ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset, -				    &raw_tstamp); - -		spin_lock(&tx->lock); -		skb = tx->tstamps[idx].skb; -		tx->tstamps[idx].skb = NULL; -		clear_bit(idx, tx->in_use); -		spin_unlock(&tx->lock); - -		/* Count the number of Tx timestamps which have timed out */ -		pf->ptp.tx_hwtstamp_timeouts++; - -		/* Free the SKB after we've cleared the bit */ -		dev_kfree_skb_any(skb); -	} -} - -/**   * ice_ptp_update_cached_phctime - Update the cached PHC time values   * @pf: Board specific private structure   * @@ -941,20 +1042,13 @@ static int ice_ptp_update_cached_phctime(struct ice_pf *pf)   * @pf: Board specific private structure   *   * This function must be called when the cached PHC time is no longer valid, - * such as after a time adjustment. It discards any outstanding Tx timestamps, - * and updates the cached PHC time for both the PF and Rx rings. If updating - * the PHC time cannot be done immediately, a warning message is logged and - * the work item is scheduled. - * - * These steps are required in order to ensure that we do not accidentally - * report a timestamp extended by the wrong PHC cached copy. Note that we - * do not directly update the cached timestamp here because it is possible - * this might produce an error when ICE_CFG_BUSY is set. If this occurred, we - * would have to try again. During that time window, timestamps might be - * requested and returned with an invalid extension. Thus, on failure to - * immediately update the cached PHC time we would need to zero the value - * anyways. For this reason, we just zero the value immediately and queue the - * update work item. + * such as after a time adjustment. It marks any currently outstanding Tx + * timestamps as stale and updates the cached PHC time for both the PF and Rx + * rings. + * + * If updating the PHC time cannot be done immediately, a warning message is + * logged and the work item is scheduled immediately to minimize the window + * with a wrong cached timestamp.   */  static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)  { @@ -978,8 +1072,12 @@ static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)  					   msecs_to_jiffies(10));  	} -	/* Flush any outstanding Tx timestamps */ -	ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx); +	/* Mark any outstanding timestamps as stale, since they might have +	 * been captured in hardware before the time update. This could lead +	 * to us extending them with the wrong cached value resulting in +	 * incorrect timestamp values. +	 */ +	ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);  }  /** @@ -1060,19 +1158,6 @@ static u64 ice_base_incval(struct ice_pf *pf)  }  /** - * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad - * @pf: The PF private data structure - * @quad: The quad (0-4) - */ -static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad) -{ -	struct ice_hw *hw = &pf->hw; - -	ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); -	ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); -} - -/**   * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state   * @port: PTP port for which Tx FIFO is checked   */ @@ -1124,7 +1209,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)  		dev_dbg(ice_pf_to_dev(pf),  			"Port %d Tx FIFO still not empty; resetting quad %d\n",  			port->port_num, quad); -		ice_ptp_reset_ts_memory_quad(pf, quad); +		ice_ptp_reset_ts_memory_quad_e822(hw, quad);  		port->tx_fifo_busy_cnt = FIFO_OK;  		return 0;  	} @@ -1133,130 +1218,49 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)  }  /** - * ice_ptp_check_tx_offset_valid - Check if the Tx PHY offset is valid - * @port: the PTP port to check - * - * Checks whether the Tx offset for the PHY associated with this port is - * valid. Returns 0 if the offset is valid, and a non-zero error code if it is - * not. - */ -static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port) -{ -	struct ice_pf *pf = ptp_port_to_pf(port); -	struct device *dev = ice_pf_to_dev(pf); -	struct ice_hw *hw = &pf->hw; -	u32 val; -	int err; - -	err = ice_ptp_check_tx_fifo(port); -	if (err) -		return err; - -	err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS, -				    &val); -	if (err) { -		dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n", -			port->port_num, err); -		return -EAGAIN; -	} - -	if (!(val & P_REG_TX_OV_STATUS_OV_M)) -		return -EAGAIN; - -	return 0; -} - -/** - * ice_ptp_check_rx_offset_valid - Check if the Rx PHY offset is valid - * @port: the PTP port to check - * - * Checks whether the Rx offset for the PHY associated with this port is - * valid. Returns 0 if the offset is valid, and a non-zero error code if it is - * not. - */ -static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port) -{ -	struct ice_pf *pf = ptp_port_to_pf(port); -	struct device *dev = ice_pf_to_dev(pf); -	struct ice_hw *hw = &pf->hw; -	int err; -	u32 val; - -	err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS, -				    &val); -	if (err) { -		dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n", -			port->port_num, err); -		return err; -	} - -	if (!(val & P_REG_RX_OV_STATUS_OV_M)) -		return -EAGAIN; - -	return 0; -} - -/** - * ice_ptp_check_offset_valid - Check port offset valid bit - * @port: Port for which offset valid bit is checked - * - * Returns 0 if both Tx and Rx offset are valid, and -EAGAIN if one of the - * offset is not ready. - */ -static int ice_ptp_check_offset_valid(struct ice_ptp_port *port) -{ -	int tx_err, rx_err; - -	/* always check both Tx and Rx offset validity */ -	tx_err = ice_ptp_check_tx_offset_valid(port); -	rx_err = ice_ptp_check_rx_offset_valid(port); - -	if (tx_err || rx_err) -		return -EAGAIN; - -	return 0; -} - -/** - * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets + * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets   * @work: Pointer to the kthread_work structure for this task   * - * Check whether both the Tx and Rx offsets are valid for enabling the vernier - * calibration. + * Check whether hardware has completed measuring the Tx and Rx offset values + * used to configure and enable vernier timestamp calibration. + * + * Once the offset in either direction is measured, configure the associated + * registers with the calibrated offset values and enable timestamping. The Tx + * and Rx directions are configured independently as soon as their associated + * offsets are known.   * - * Once we have valid offsets from hardware, update the total Tx and Rx - * offsets, and exit bypass mode. This enables more precise timestamps using - * the extra data measured during the vernier calibration process. + * This function reschedules itself until both Tx and Rx calibration have + * completed.   */ -static void ice_ptp_wait_for_offset_valid(struct kthread_work *work) +static void ice_ptp_wait_for_offsets(struct kthread_work *work)  {  	struct ice_ptp_port *port; -	int err; -	struct device *dev;  	struct ice_pf *pf;  	struct ice_hw *hw; +	int tx_err; +	int rx_err;  	port = container_of(work, struct ice_ptp_port, ov_work.work);  	pf = ptp_port_to_pf(port);  	hw = &pf->hw; -	dev = ice_pf_to_dev(pf); -	if (ice_is_reset_in_progress(pf->state)) -		return; - -	if (ice_ptp_check_offset_valid(port)) { -		/* Offsets not ready yet, try again later */ +	if (ice_is_reset_in_progress(pf->state)) { +		/* wait for device driver to complete reset */  		kthread_queue_delayed_work(pf->ptp.kworker,  					   &port->ov_work,  					   msecs_to_jiffies(100));  		return;  	} -	/* Offsets are valid, so it is safe to exit bypass mode */ -	err = ice_phy_exit_bypass_e822(hw, port->port_num); -	if (err) { -		dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n", -			 port->port_num, err); +	tx_err = ice_ptp_check_tx_fifo(port); +	if (!tx_err) +		tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num); +	rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num); +	if (tx_err || rx_err) { +		/* Tx and/or Rx offset not yet configured, try again later */ +		kthread_queue_delayed_work(pf->ptp.kworker, +					   &port->ov_work, +					   msecs_to_jiffies(100));  		return;  	}  } @@ -1317,16 +1321,20 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)  	kthread_cancel_delayed_work_sync(&ptp_port->ov_work);  	/* temporarily disable Tx timestamps while calibrating PHY offset */ +	spin_lock(&ptp_port->tx.lock);  	ptp_port->tx.calibrating = true; +	spin_unlock(&ptp_port->tx.lock);  	ptp_port->tx_fifo_busy_cnt = 0; -	/* Start the PHY timer in bypass mode */ -	err = ice_start_phy_timer_e822(hw, port, true); +	/* Start the PHY timer in Vernier mode */ +	err = ice_start_phy_timer_e822(hw, port);  	if (err)  		goto out_unlock;  	/* Enable Tx timestamps right away */ +	spin_lock(&ptp_port->tx.lock);  	ptp_port->tx.calibrating = false; +	spin_unlock(&ptp_port->tx.lock);  	kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); @@ -1341,45 +1349,33 @@ out_unlock:  }  /** - * ice_ptp_link_change - Set or clear port registers for timestamping + * ice_ptp_link_change - Reconfigure PTP after link status change   * @pf: Board private structure   * @port: Port for which the PHY start is set   * @linkup: Link is up or down   */ -int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)  {  	struct ice_ptp_port *ptp_port; -	if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) -		return 0; +	if (!test_bit(ICE_FLAG_PTP, pf->flags)) +		return; -	if (port >= ICE_NUM_EXTERNAL_PORTS) -		return -EINVAL; +	if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) +		return;  	ptp_port = &pf->ptp.port; -	if (ptp_port->port_num != port) -		return -EINVAL; +	if (WARN_ON_ONCE(ptp_port->port_num != port)) +		return; -	/* Update cached link err for this port immediately */ +	/* Update cached link status for this port immediately */  	ptp_port->link_up = linkup; -	if (!test_bit(ICE_FLAG_PTP, pf->flags)) -		/* PTP is not setup */ -		return -EAGAIN; - -	return ice_ptp_port_phy_restart(ptp_port); -} - -/** - * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads - * @pf: The PF private data structure - */ -static void ice_ptp_reset_ts_memory(struct ice_pf *pf) -{ -	int quad; +	/* E810 devices do not need to reconfigure the PHY */ +	if (ice_is_e810(&pf->hw)) +		return; -	quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD; -	ice_ptp_reset_ts_memory_quad(pf, quad); +	ice_ptp_port_phy_restart(ptp_port);  }  /** @@ -1397,7 +1393,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)  	int quad;  	u32 val; -	ice_ptp_reset_ts_memory(pf); +	ice_ptp_reset_ts_memory(hw);  	for (quad = 0; quad < ICE_MAX_QUAD; quad++) {  		err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, @@ -1447,24 +1443,10 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)  {  	struct ice_pf *pf = ptp_info_to_pf(info);  	struct ice_hw *hw = &pf->hw; -	u64 incval, diff; -	int neg_adj = 0; +	u64 incval;  	int err; -	incval = ice_base_incval(pf); - -	if (scaled_ppm < 0) { -		neg_adj = 1; -		scaled_ppm = -scaled_ppm; -	} - -	diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm, -				   1000000ULL << 16); -	if (neg_adj) -		incval -= diff; -	else -		incval += diff; - +	incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);  	err = ice_ptp_write_incval_locked(hw, incval);  	if (err) {  		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", @@ -1792,6 +1774,38 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,  }  /** + * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC + * @info: the driver's PTP info structure + * @rq: The requested feature to change + * @on: Enable/disable flag + */ +static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, +				    struct ptp_clock_request *rq, int on) +{ +	struct ice_pf *pf = ptp_info_to_pf(info); +	struct ice_perout_channel clk_cfg = {0}; +	int err; + +	switch (rq->type) { +	case PTP_CLK_REQ_PPS: +		clk_cfg.gpio_pin = PPS_PIN_INDEX; +		clk_cfg.period = NSEC_PER_SEC; +		clk_cfg.ena = !!on; + +		err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); +		break; +	case PTP_CLK_REQ_EXTTS: +		err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, +					TIME_SYNC_PIN_INDEX, rq->extts.flags); +		break; +	default: +		return -EOPNOTSUPP; +	} + +	return err; +} + +/**   * ice_ptp_gettimex64 - Get the time of the clock   * @info: the driver's PTP info structure   * @ts: timespec64 structure to hold the current time value @@ -2243,6 +2257,19 @@ ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)  }  /** + * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs + * @pf: pointer to the PF instance + * @info: PTP clock capabilities + */ +static void +ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) +{ +	info->pps = 1; +	info->n_per_out = 0; +	info->n_ext_ts = 1; +} + +/**   * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support   * @pf: Board private structure   * @info: PTP info to fill @@ -2280,6 +2307,23 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)  }  /** + * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support + * @pf: Board private structure + * @info: PTP info to fill + * + * Assign functions to the PTP capabiltiies structure for E823 devices. + * Functions which operate across all device families should be set directly + * in ice_ptp_set_caps. Only add functions here which are distinct for e823 + * devices. + */ +static void +ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) +{ +	info->enable = ice_ptp_gpio_enable_e823; +	ice_ptp_setup_pins_e823(pf, info); +} + +/**   * ice_ptp_set_caps - Set PTP capabilities   * @pf: Board private structure   */ @@ -2291,7 +2335,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)  	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",  		 dev_driver_string(dev), dev_name(dev));  	info->owner = THIS_MODULE; -	info->max_adj = 999999999; +	info->max_adj = 100000000;  	info->adjtime = ice_ptp_adjtime;  	info->adjfine = ice_ptp_adjfine;  	info->gettimex64 = ice_ptp_gettimex64; @@ -2299,6 +2343,8 @@ static void ice_ptp_set_caps(struct ice_pf *pf)  	if (ice_is_e810(&pf->hw))  		ice_ptp_set_funcs_e810(pf, info); +	else if (ice_is_e823(&pf->hw)) +		ice_ptp_set_funcs_e823(pf, info);  	else  		ice_ptp_set_funcs_e822(pf, info);  } @@ -2346,11 +2392,14 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)  {  	u8 idx; -	/* Check if this tracker is initialized */ -	if (!tx->init || tx->calibrating) +	spin_lock(&tx->lock); + +	/* Check that this tracker is accepting new timestamp requests */ +	if (!ice_ptp_is_tx_tracker_up(tx)) { +		spin_unlock(&tx->lock);  		return -1; +	} -	spin_lock(&tx->lock);  	/* Find and set the first available index */  	idx = find_first_zero_bit(tx->in_use, tx->len);  	if (idx < tx->len) { @@ -2359,6 +2408,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)  		 * requests.  		 */  		set_bit(idx, tx->in_use); +		clear_bit(idx, tx->stale);  		tx->tstamps[idx].start = jiffies;  		tx->tstamps[idx].skb = skb_get(skb);  		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; @@ -2373,7 +2423,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)  	if (idx >= tx->len)  		return -1;  	else -		return idx + tx->quad_offset; +		return idx + tx->offset;  }  /** @@ -2398,8 +2448,6 @@ static void ice_ptp_periodic_work(struct kthread_work *work)  	err = ice_ptp_update_cached_phctime(pf); -	ice_ptp_tx_tstamp_cleanup(pf, &pf->ptp.port.tx); -  	/* Run twice a second or reschedule if phc update failed */  	kthread_queue_delayed_work(ptp->kworker, &ptp->work,  				   msecs_to_jiffies(err ? 10 : 500)); @@ -2476,7 +2524,7 @@ pfr:  		err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);  	} else {  		kthread_init_delayed_work(&ptp->port.ov_work, -					  ice_ptp_wait_for_offset_valid); +					  ice_ptp_wait_for_offsets);  		err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,  					   ptp->port.port_num);  	} @@ -2639,7 +2687,7 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)  		return ice_ptp_init_tx_e810(pf, &ptp_port->tx);  	kthread_init_delayed_work(&ptp_port->ov_work, -				  ice_ptp_wait_for_offset_valid); +				  ice_ptp_wait_for_offsets);  	return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);  } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 028349295b71..9cda2f43e0e5 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -93,9 +93,14 @@ struct ice_perout_channel {   * we discard old requests that were not fulfilled within a 2 second time   * window.   * Timestamp values in the PHY are read only and do not get cleared except at - * hardware reset or when a new timestamp value is captured. The cached_tstamp - * field is used to detect the case where a new timestamp has not yet been - * captured, ensuring that we avoid sending stale timestamp data to the stack. + * hardware reset or when a new timestamp value is captured. + * + * Some PHY types do not provide a "ready" bitmap indicating which timestamp + * indexes are valid. In these cases, we use a cached_tstamp to keep track of + * the last timestamp we read for a given index. If the current timestamp + * value is the same as the cached value, we assume a new timestamp hasn't + * been captured. This avoids reporting stale timestamps to the stack. This is + * only done if the verify_cached flag is set in ice_ptp_tx structure.   */  struct ice_tx_tstamp {  	struct sk_buff *skb; @@ -105,30 +110,35 @@ struct ice_tx_tstamp {  /**   * struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port - * @lock: lock to prevent concurrent write to in_use bitmap + * @lock: lock to prevent concurrent access to fields of this struct   * @tstamps: array of len to store outstanding requests   * @in_use: bitmap of len to indicate which slots are in use - * @quad: which quad the timestamps are captured in - * @quad_offset: offset into timestamp block of the quad to get the real index + * @stale: bitmap of len to indicate slots which have stale timestamps + * @block: which memory block (quad or port) the timestamps are captured in + * @offset: offset into timestamp block to get the real index   * @len: length of the tstamps and in_use fields.   * @init: if true, the tracker is initialized;   * @calibrating: if true, the PHY is calibrating the Tx offset. During this   *               window, timestamps are temporarily disabled. + * @verify_cached: if true, verify new timestamp differs from last read value   */  struct ice_ptp_tx {  	spinlock_t lock; /* lock protecting in_use bitmap */  	struct ice_tx_tstamp *tstamps;  	unsigned long *in_use; -	u8 quad; -	u8 quad_offset; +	unsigned long *stale; +	u8 block; +	u8 offset;  	u8 len; -	u8 init; -	u8 calibrating; +	u8 init : 1; +	u8 calibrating : 1; +	u8 verify_cached : 1;  };  /* Quad and port information for initializing timestamp blocks */  #define INDEX_PER_QUAD			64 -#define INDEX_PER_PORT			(INDEX_PER_QUAD / ICE_PORTS_PER_QUAD) +#define INDEX_PER_PORT_E822		16 +#define INDEX_PER_PORT_E810		64  /**   * struct ice_ptp_port - data used to initialize an external port for PTP @@ -256,7 +266,7 @@ void ice_ptp_reset(struct ice_pf *pf);  void ice_ptp_prepare_for_reset(struct ice_pf *pf);  void ice_ptp_init(struct ice_pf *pf);  void ice_ptp_release(struct ice_pf *pf); -int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); +void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);  #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */  static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)  { @@ -291,7 +301,8 @@ static inline void ice_ptp_reset(struct ice_pf *pf) { }  static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }  static inline void ice_ptp_init(struct ice_pf *pf) { }  static inline void ice_ptp_release(struct ice_pf *pf) { } -static inline int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) -{ return 0; } +static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +{ +}  #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */  #endif /* _ICE_PTP_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 772b1f566d6e..a38614d21ea8 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -656,6 +656,32 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)  }  /** + * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block + * @hw: pointer to the HW struct + * @quad: the quad to read from + * + * Clear all timestamps from the PHY quad block that is shared between the + * internal PHYs on the E822 devices. + */ +void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad) +{ +	ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); +	ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); +} + +/** + * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks + * @hw: pointer to the HW struct + */ +static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) +{ +	unsigned int quad; + +	for (quad = 0; quad < ICE_MAX_QUAD; quad++) +		ice_ptp_reset_ts_memory_quad_e822(hw, quad); +} + +/**   * ice_read_cgu_reg_e822 - Read a CGU register   * @hw: pointer to the HW struct   * @addr: Register address to read @@ -1715,21 +1741,48 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)   * adjust Tx timestamps by. This is calculated by combining some known static   * latency along with the Vernier offset computations done by hardware.   * - * This function must be called only after the offset registers are valid, - * i.e. after the Vernier calibration wait has passed, to ensure that the PHY - * has measured the offset. + * This function will not return successfully until the Tx offset calculations + * have been completed, which requires waiting until at least one packet has + * been transmitted by the device. It is safe to call this function + * periodically until calibration succeeds, as it will only program the offset + * once.   *   * To avoid overflow, when calculating the offset based on the known static   * latency values, we use measurements in 1/100th of a nanosecond, and divide   * the TUs per second up front. This avoids overflow while allowing   * calculation of the adjustment using integer arithmetic. + * + * Returns zero on success, -EBUSY if the hardware vernier offset + * calibration has not completed, or another error code on failure.   */ -static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) +int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)  {  	enum ice_ptp_link_spd link_spd;  	enum ice_ptp_fec_mode fec_mode;  	u64 total_offset, val;  	int err; +	u32 reg; + +	/* Nothing to do if we've already programmed the offset */ +	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, ®); +	if (err) { +		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n", +			  port, err); +		return err; +	} + +	if (reg) +		return 0; + +	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, ®); +	if (err) { +		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n", +			  port, err); +		return err; +	} + +	if (!(reg & P_REG_TX_OV_STATUS_OV_M)) +		return -EBUSY;  	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);  	if (err) @@ -1783,46 +1836,8 @@ static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)  	if (err)  		return err; -	return 0; -} - -/** - * ice_phy_cfg_fixed_tx_offset_e822 - Configure Tx offset for bypass mode - * @hw: pointer to the HW struct - * @port: the PHY port to configure - * - * Calculate and program the fixed Tx offset, and indicate that the offset is - * ready. This can be used when operating in bypass mode. - */ -static int -ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw *hw, u8 port) -{ -	enum ice_ptp_link_spd link_spd; -	enum ice_ptp_fec_mode fec_mode; -	u64 total_offset; -	int err; - -	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); -	if (err) -		return err; - -	total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd); - -	/* Program the fixed Tx offset into the P_REG_TOTAL_TX_OFFSET_L -	 * register, then indicate that the Tx offset is ready. After this, -	 * timestamps will be enabled. -	 * -	 * Note that this skips including the more precise offsets generated -	 * by the Vernier calibration. -	 */ -	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, -					 total_offset); -	if (err) -		return err; - -	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); -	if (err) -		return err; +	dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n", +		 port);  	return 0;  } @@ -2026,6 +2041,11 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)   * measurements taken in hardware with some data about known fixed delay as   * well as adjusting for multi-lane alignment delay.   * + * This function will not return successfully until the Rx offset calculations + * have been completed, which requires waiting until at least one packet has + * been received by the device. It is safe to call this function periodically + * until calibration succeeds, as it will only program the offset once. + *   * This function must be called only after the offset registers are valid,   * i.e. after the Vernier calibration wait has passed, to ensure that the PHY   * has measured the offset. @@ -2034,13 +2054,38 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)   * latency values, we use measurements in 1/100th of a nanosecond, and divide   * the TUs per second up front. This avoids overflow while allowing   * calculation of the adjustment using integer arithmetic. + * + * Returns zero on success, -EBUSY if the hardware vernier offset + * calibration has not completed, or another error code on failure.   */ -static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) +int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)  {  	enum ice_ptp_link_spd link_spd;  	enum ice_ptp_fec_mode fec_mode;  	u64 total_offset, pmd, val;  	int err; +	u32 reg; + +	/* Nothing to do if we've already programmed the offset */ +	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, ®); +	if (err) { +		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n", +			  port, err); +		return err; +	} + +	if (reg) +		return 0; + +	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, ®); +	if (err) { +		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n", +			  port, err); +		return err; +	} + +	if (!(reg & P_REG_RX_OV_STATUS_OV_M)) +		return -EBUSY;  	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);  	if (err) @@ -2101,46 +2146,8 @@ static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)  	if (err)  		return err; -	return 0; -} - -/** - * ice_phy_cfg_fixed_rx_offset_e822 - Configure fixed Rx offset for bypass mode - * @hw: pointer to the HW struct - * @port: the PHY port to configure - * - * Calculate and program the fixed Rx offset, and indicate that the offset is - * ready. This can be used when operating in bypass mode. - */ -static int -ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port) -{ -	enum ice_ptp_link_spd link_spd; -	enum ice_ptp_fec_mode fec_mode; -	u64 total_offset; -	int err; - -	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); -	if (err) -		return err; - -	total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd); - -	/* Program the fixed Rx offset into the P_REG_TOTAL_RX_OFFSET_L -	 * register, then indicate that the Rx offset is ready. After this, -	 * timestamps will be enabled. -	 * -	 * Note that this skips including the more precise offsets generated -	 * by Vernier calibration. -	 */ -	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, -					 total_offset); -	if (err) -		return err; - -	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); -	if (err) -		return err; +	dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n", +		 port);  	return 0;  } @@ -2323,20 +2330,14 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)   * ice_start_phy_timer_e822 - Start the PHY clock timer   * @hw: pointer to the HW struct   * @port: the PHY port to start - * @bypass: if true, start the PHY in bypass mode   *   * Start the clock of a PHY port. This must be done as part of the flow to   * re-calibrate Tx and Rx timestamping offsets whenever the clock time is   * initialized or when link speed changes.   * - * Bypass mode enables timestamps immediately without waiting for Vernier - * calibration to complete. Hardware will still continue taking Vernier - * measurements on Tx or Rx of packets, but they will not be applied to - * timestamps. Use ice_phy_exit_bypass_e822 to exit bypass mode once hardware - * has completed offset calculation. + * Hardware will take Vernier measurements on Tx or Rx of packets.   */ -int -ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass) +int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)  {  	u32 lo, hi, val;  	u64 incval; @@ -2414,110 +2415,42 @@ ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)  	if (err)  		return err; -	if (bypass) { -		val |= P_REG_PS_BYPASS_MODE_M; -		/* Enter BYPASS mode, enabling timestamps immediately. */ -		err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); -		if (err) -			return err; - -		/* Program the fixed Tx offset */ -		err = ice_phy_cfg_fixed_tx_offset_e822(hw, port); -		if (err) -			return err; - -		/* Program the fixed Rx offset */ -		err = ice_phy_cfg_fixed_rx_offset_e822(hw, port); -		if (err) -			return err; -	} -  	ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);  	return 0;  }  /** - * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations + * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register   * @hw: pointer to the HW struct - * @port: the PHY port to configure - * - * After hardware finishes vernier calculations for the Tx and Rx offset, this - * function can be used to exit bypass mode by updating the total Tx and Rx - * offsets, and then disabling bypass. This will enable hardware to include - * the more precise offset calibrations, increasing precision of the generated - * timestamps. + * @quad: the timestamp quad to read from + * @tstamp_ready: contents of the Tx memory status register   * - * This cannot be done until hardware has measured the offsets, which requires - * waiting until at least one packet has been sent and received by the device. + * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in + * the PHY are ready. A set bit means the corresponding timestamp is valid and + * ready to be captured from the PHY timestamp block.   */ -int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port) +static int +ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)  { +	u32 hi, lo;  	int err; -	u32 val; - -	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val); -	if (err) { -		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n", -			  port, err); -		return err; -	} - -	if (!(val & P_REG_TX_OV_STATUS_OV_M)) { -		ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n", -			  port); -		return -EBUSY; -	} - -	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val); -	if (err) { -		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n", -			  port, err); -		return err; -	} - -	if (!(val & P_REG_TX_OV_STATUS_OV_M)) { -		ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n", -			  port); -		return -EBUSY; -	} -	err = ice_phy_cfg_tx_offset_e822(hw, port); +	err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);  	if (err) { -		ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n", -			  port, err); +		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n", +			  quad, err);  		return err;  	} -	err = ice_phy_cfg_rx_offset_e822(hw, port); +	err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);  	if (err) { -		ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n", -			  port, err); +		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n", +			  quad, err);  		return err;  	} -	/* Exit bypass mode now that the offset has been updated */ -	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); -	if (err) { -		ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, err %d\n", -			  port, err); -		return err; -	} - -	if (!(val & P_REG_PS_BYPASS_MODE_M)) -		ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n", -			  port); - -	val &= ~P_REG_PS_BYPASS_MODE_M; -	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); -	if (err) { -		ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, err %d\n", -			  port, err); -		return err; -	} - -	dev_info(ice_hw_to_dev(hw), "Exiting bypass mode on PHY port %u\n", -		 port); +	*tstamp_ready = (u64)hi << 32 | (u64)lo;  	return 0;  } @@ -2963,16 +2896,18 @@ bool ice_ptp_lock(struct ice_hw *hw)  	u32 hw_lock;  	int i; -#define MAX_TRIES 5 +#define MAX_TRIES 15  	for (i = 0; i < MAX_TRIES; i++) {  		hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));  		hw_lock = hw_lock & PFTSYN_SEM_BUSY_M; -		if (!hw_lock) -			break; +		if (hw_lock) { +			/* Somebody is holding the lock */ +			usleep_range(5000, 6000); +			continue; +		} -		/* Somebody is holding the lock */ -		usleep_range(10000, 20000); +		break;  	}  	return !hw_lock; @@ -3194,6 +3129,22 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)  		return ice_clear_phy_tstamp_e822(hw, block, idx);  } +/** + * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @tstamp_ready: contents of the Tx memory status register + * + * E810 devices do not use a Tx memory status register. Instead simply + * indicate that all timestamps are currently ready. + */ +static int +ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) +{ +	*tstamp_ready = 0xFFFFFFFFFFFFFFFF; +	return 0; +} +  /* E810T SMA functions   *   * The following functions operate specifically on E810T hardware and are used @@ -3377,6 +3328,18 @@ bool ice_is_pca9575_present(struct ice_hw *hw)  }  /** + * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks + * @hw: pointer to the HW struct + */ +void ice_ptp_reset_ts_memory(struct ice_hw *hw) +{ +	if (ice_is_e810(hw)) +		return; + +	ice_ptp_reset_ts_memory_e822(hw); +} + +/**   * ice_ptp_init_phc - Initialize PTP hardware clock   * @hw: pointer to the HW struct   * @@ -3397,3 +3360,24 @@ int ice_ptp_init_phc(struct ice_hw *hw)  	else  		return ice_ptp_init_phc_e822(hw);  } + +/** + * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication + * @hw: pointer to the HW struct + * @block: the timestamp block to check + * @tstamp_ready: storage for the PHY Tx memory status information + * + * Check the PHY for Tx timestamp memory status. This reports a 64 bit value + * which indicates which timestamps in the block may be captured. A set bit + * means the timestamp can be read. An unset bit means the timestamp is not + * ready and software should avoid reading the register. + */ +int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) +{ +	if (ice_is_e810(hw)) +		return ice_get_phy_tx_tstamp_ready_e810(hw, block, +							tstamp_ready); +	else +		return ice_get_phy_tx_tstamp_ready_e822(hw, block, +							tstamp_ready); +} diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 2bda64c76abc..3b68cb91bd81 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -133,7 +133,9 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);  int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);  int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);  int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); +void ice_ptp_reset_ts_memory(struct ice_hw *hw);  int ice_ptp_init_phc(struct ice_hw *hw); +int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);  /* E822 family functions */  int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val); @@ -141,6 +143,7 @@ int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);  int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);  int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);  int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time); +void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);  /**   * ice_e822_time_ref - Get the current TIME_REF from capabilities @@ -184,8 +187,9 @@ static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)  /* E822 Vernier calibration functions */  int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset); -int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass); -int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port); +int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port); +int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port); +int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port);  /* E810 family functions */  int ice_ptp_init_phy_e810(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index bd31748aae1b..fd1f8b0ad0ab 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -6,6 +6,7 @@  #include "ice_devlink.h"  #include "ice_sriov.h"  #include "ice_tc_lib.h" +#include "ice_dcb_lib.h"  /**   * ice_repr_get_sw_port_id - get port ID associated with representor @@ -134,14 +135,6 @@ static int ice_repr_stop(struct net_device *netdev)  	return 0;  } -static struct devlink_port * -ice_repr_get_devlink_port(struct net_device *netdev) -{ -	struct ice_repr *repr = ice_netdev_to_repr(netdev); - -	return &repr->vf->devlink_port; -} -  /**   * ice_repr_sp_stats64 - get slow path stats for port representor   * @dev: network interface device structure @@ -163,18 +156,20 @@ ice_repr_sp_stats64(const struct net_device *dev,  	u64 pkts, bytes;  	tx_ring = np->vsi->tx_rings[vf_id]; -	ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats, +	ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp, +				     tx_ring->ring_stats->stats,  				     &pkts, &bytes);  	stats->rx_packets = pkts;  	stats->rx_bytes = bytes;  	rx_ring = np->vsi->rx_rings[vf_id]; -	ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats, +	ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp, +				     rx_ring->ring_stats->stats,  				     &pkts, &bytes);  	stats->tx_packets = pkts;  	stats->tx_bytes = bytes; -	stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed + -			    rx_ring->rx_stats.alloc_buf_failed; +	stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed + +			    rx_ring->ring_stats->rx_stats.alloc_buf_failed;  	return 0;  } @@ -250,7 +245,6 @@ static const struct net_device_ops ice_repr_netdev_ops = {  	.ndo_open = ice_repr_open,  	.ndo_stop = ice_repr_stop,  	.ndo_start_xmit = ice_eswitch_port_start_xmit, -	.ndo_get_devlink_port = ice_repr_get_devlink_port,  	.ndo_setup_tc = ice_repr_setup_tc,  	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,  	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, @@ -339,12 +333,11 @@ static int ice_repr_add(struct ice_vf *vf)  	repr->netdev->max_mtu = ICE_MAX_MTU;  	SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); +	SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);  	err = ice_repr_reg_netdev(repr->netdev);  	if (err)  		goto err_netdev; -	devlink_port_type_eth_set(&vf->devlink_port, repr->netdev); -  	ice_virtchnl_set_repr_ops(vf);  	return 0; @@ -399,6 +392,7 @@ static void ice_repr_rem(struct ice_vf *vf)   */  void ice_repr_rem_from_all_vfs(struct ice_pf *pf)  { +	struct devlink *devlink;  	struct ice_vf *vf;  	unsigned int bkt; @@ -406,6 +400,14 @@ void ice_repr_rem_from_all_vfs(struct ice_pf *pf)  	ice_for_each_vf(pf, bkt, vf)  		ice_repr_rem(vf); + +	/* since all port representors are destroyed, there is +	 * no point in keeping the nodes +	 */ +	devlink = priv_to_devlink(pf); +	devl_lock(devlink); +	devl_rate_nodes_destroy(devlink); +	devl_unlock(devlink);  }  /** @@ -414,6 +416,7 @@ void ice_repr_rem_from_all_vfs(struct ice_pf *pf)   */  int ice_repr_add_for_all_vfs(struct ice_pf *pf)  { +	struct devlink *devlink;  	struct ice_vf *vf;  	unsigned int bkt;  	int err; @@ -426,6 +429,13 @@ int ice_repr_add_for_all_vfs(struct ice_pf *pf)  			goto err;  	} +	/* only export if ADQ and DCB disabled */ +	if (ice_is_adq_active(pf) || ice_is_dcb_active(pf)) +		return 0; + +	devlink = priv_to_devlink(pf); +	ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); +  	return 0;  err: diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 118595763bba..4eca8d195ef0 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1,6 +1,7 @@  // SPDX-License-Identifier: GPL-2.0  /* Copyright (c) 2018, Intel Corporation. */ +#include <net/devlink.h>  #include "ice_sched.h"  /** @@ -142,12 +143,14 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,   * @pi: port information structure   * @layer: Scheduler layer of the node   * @info: Scheduler element information from firmware + * @prealloc_node: preallocated ice_sched_node struct for SW DB   *   * This function inserts a scheduler node to the SW DB.   */  int  ice_sched_add_node(struct ice_port_info *pi, u8 layer, -		   struct ice_aqc_txsched_elem_data *info) +		   struct ice_aqc_txsched_elem_data *info, +		   struct ice_sched_node *prealloc_node)  {  	struct ice_aqc_txsched_elem_data elem;  	struct ice_sched_node *parent; @@ -176,7 +179,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,  	if (status)  		return status; -	node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); +	if (prealloc_node) +		node = prealloc_node; +	else +		node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);  	if (!node)  		return -ENOMEM;  	if (hw->max_children[layer]) { @@ -355,6 +361,9 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)  	/* leaf nodes have no children */  	if (node->children)  		devm_kfree(ice_hw_to_dev(hw), node->children); + +	kfree(node->name); +	xa_erase(&pi->sched_node_ids, node->id);  	devm_kfree(ice_hw_to_dev(hw), node);  } @@ -872,13 +881,15 @@ void ice_sched_cleanup_all(struct ice_hw *hw)   * @num_nodes: number of nodes   * @num_nodes_added: pointer to num nodes added   * @first_node_teid: if new nodes are added then return the TEID of first node + * @prealloc_nodes: preallocated nodes struct for software DB   *   * This function add nodes to HW as well as to SW DB for a given layer   */ -static int +int  ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,  		    struct ice_sched_node *parent, u8 layer, u16 num_nodes, -		    u16 *num_nodes_added, u32 *first_node_teid) +		    u16 *num_nodes_added, u32 *first_node_teid, +		    struct ice_sched_node **prealloc_nodes)  {  	struct ice_sched_node *prev, *new_node;  	struct ice_aqc_add_elem *buf; @@ -924,7 +935,11 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,  	*num_nodes_added = num_nodes;  	/* add nodes to the SW DB */  	for (i = 0; i < num_nodes; i++) { -		status = ice_sched_add_node(pi, layer, &buf->generic[i]); +		if (prealloc_nodes) +			status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]); +		else +			status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL); +  		if (status) {  			ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",  				  status); @@ -940,6 +955,22 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,  		new_node->sibling = NULL;  		new_node->tc_num = tc_node->tc_num; +		new_node->tx_weight = ICE_SCHED_DFLT_BW_WT; +		new_node->tx_share = ICE_SCHED_DFLT_BW; +		new_node->tx_max = ICE_SCHED_DFLT_BW; +		new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL); +		if (!new_node->name) +			return -ENOMEM; + +		status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX), +				  GFP_KERNEL); +		if (status) { +			ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n", +				  status); +			break; +		} + +		snprintf(new_node->name, SCHED_NODE_NAME_MAX_LEN, "node_%u", new_node->id);  		/* add it to previous node sibling pointer */  		/* Note: siblings are not linked across branches */ @@ -1003,7 +1034,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,  	}  	return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, -				   num_nodes_added, first_node_teid); +				   num_nodes_added, first_node_teid, NULL);  }  /** @@ -1032,7 +1063,6 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,  	*num_nodes_added = 0;  	while (*num_nodes_added < num_nodes) {  		u16 max_child_nodes, num_added = 0; -		/* cppcheck-suppress unusedVariable */  		u32 temp;  		status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, @@ -1268,7 +1298,7 @@ int ice_sched_init_port(struct ice_port_info *pi)  			    ICE_AQC_ELEM_TYPE_ENTRY_POINT)  				hw->sw_entry_point_layer = j; -			status = ice_sched_add_node(pi, j, &buf[i].generic[j]); +			status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL);  			if (status)  				goto err_init_port;  		} @@ -1624,12 +1654,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,  	u32 first_node_teid;  	u16 num_added = 0;  	u8 i, qgl, vsil; -	int status;  	qgl = ice_sched_get_qgrp_layer(hw);  	vsil = ice_sched_get_vsi_layer(hw);  	parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);  	for (i = vsil + 1; i <= qgl; i++) { +		int status; +  		if (!parent)  			return -EIO; @@ -1725,13 +1756,14 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,  	u32 first_node_teid;  	u16 num_added = 0;  	u8 i, vsil; -	int status;  	if (!pi)  		return -EINVAL;  	vsil = ice_sched_get_vsi_layer(pi->hw);  	for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { +		int status; +  		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,  						      i, num_nodes[i],  						      &first_node_teid, @@ -2154,7 +2186,7 @@ ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,   * This function removes the child from the old parent and adds it to a new   * parent   */ -static void +void  ice_sched_update_parent(struct ice_sched_node *new_parent,  			struct ice_sched_node *node)  { @@ -2188,7 +2220,7 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,   *   * This function move the child nodes to a given parent.   */ -static int +int  ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,  		     u16 num_items, u32 *list)  { @@ -3560,7 +3592,7 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi,   * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile   * ID from local database. The caller needs to hold scheduler lock.   */ -static int +int  ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,  		      enum ice_rl_type rl_type, u32 bw, u8 layer_num)  { @@ -3597,6 +3629,57 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,  }  /** + * ice_sched_set_node_priority - set node's priority + * @pi: port information structure + * @node: tree node + * @priority: number 0-7 representing priority among siblings + * + * This function sets priority of a node among it's siblings. + */ +int +ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node, +			    u16 priority) +{ +	struct ice_aqc_txsched_elem_data buf; +	struct ice_aqc_txsched_elem *data; + +	buf = node->info; +	data = &buf.data; + +	data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; +	data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority); + +	return ice_sched_update_elem(pi->hw, node, &buf); +} + +/** + * ice_sched_set_node_weight - set node's weight + * @pi: port information structure + * @node: tree node + * @weight: number 1-200 representing weight for WFQ + * + * This function sets weight of the node for WFQ algorithm. + */ +int +ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight) +{ +	struct ice_aqc_txsched_elem_data buf; +	struct ice_aqc_txsched_elem *data; + +	buf = node->info; +	data = &buf.data; + +	data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR | +			       ICE_AQC_ELEM_VALID_GENERIC; +	data->cir_bw.bw_alloc = cpu_to_le16(weight); +	data->eir_bw.bw_alloc = cpu_to_le16(weight); + +	data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0); + +	return ice_sched_update_elem(pi->hw, node, &buf); +} + +/**   * ice_sched_set_node_bw_lmt - set node's BW limit   * @pi: port information structure   * @node: tree node @@ -3606,7 +3689,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,   * It updates node's BW limit parameters like BW RL profile ID of type CIR,   * EIR, or SRL. The caller needs to hold scheduler lock.   */ -static int +int  ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,  			  enum ice_rl_type rl_type, u32 bw)  { diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 4f91577fed56..9c100747445a 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -6,6 +6,8 @@  #include "ice_common.h" +#define SCHED_NODE_NAME_MAX_LEN 32 +  #define ICE_QGRP_LAYER_OFFSET	2  #define ICE_VSI_LAYER_OFFSET	4  #define ICE_AGG_LAYER_OFFSET	6 @@ -69,6 +71,29 @@ int  ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,  			 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,  			 u16 *elems_ret, struct ice_sq_cd *cd); + +int +ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, +			  enum ice_rl_type rl_type, u32 bw); + +int +ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, +		      enum ice_rl_type rl_type, u32 bw, u8 layer_num); + +int +ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, +		    struct ice_sched_node *parent, u8 layer, u16 num_nodes, +		    u16 *num_nodes_added, u32 *first_node_teid, +		    struct ice_sched_node **prealloc_node); + +int +ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, +		     u16 num_items, u32 *list); + +int ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node, +				u16 priority); +int ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight); +  int ice_sched_init_port(struct ice_port_info *pi);  int ice_sched_query_res_alloc(struct ice_hw *hw);  void ice_sched_get_psm_clk_freq(struct ice_hw *hw); @@ -81,7 +106,11 @@ struct ice_sched_node *  ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);  int  ice_sched_add_node(struct ice_port_info *pi, u8 layer, -		   struct ice_aqc_txsched_elem_data *info); +		   struct ice_aqc_txsched_elem_data *info, +		   struct ice_sched_node *prealloc_node); +void +ice_sched_update_parent(struct ice_sched_node *new_parent, +			struct ice_sched_node *node);  void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);  struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);  struct ice_sched_node * diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 3ba1408c56a9..96a64c25e2ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -41,21 +41,6 @@ static void ice_free_vf_entries(struct ice_pf *pf)  }  /** - * ice_vf_vsi_release - invalidate the VF's VSI after freeing it - * @vf: invalidate this VF's VSI after freeing it - */ -static void ice_vf_vsi_release(struct ice_vf *vf) -{ -	struct ice_vsi *vsi = ice_get_vf_vsi(vf); - -	if (WARN_ON(!vsi)) -		return; - -	ice_vsi_release(vsi); -	ice_vf_invalidate_vsi(vf); -} - -/**   * ice_free_vf_res - Free a VF's resources   * @vf: pointer to the VF info   */ @@ -248,11 +233,16 @@ void ice_free_vfs(struct ice_pf *pf)   */  static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)  { -	struct ice_port_info *pi = ice_vf_get_port_info(vf); +	struct ice_vsi_cfg_params params = {};  	struct ice_pf *pf = vf->pf;  	struct ice_vsi *vsi; -	vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL); +	params.type = ICE_VSI_VF; +	params.pi = ice_vf_get_port_info(vf); +	params.vf = vf; +	params.flags = ICE_VSI_FLAG_INIT; + +	vsi = ice_vsi_setup(pf, ¶ms);  	if (!vsi) {  		dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); @@ -583,51 +573,19 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)   */  static int ice_init_vf_vsi_res(struct ice_vf *vf)  { -	struct ice_vsi_vlan_ops *vlan_ops;  	struct ice_pf *pf = vf->pf; -	u8 broadcast[ETH_ALEN];  	struct ice_vsi *vsi; -	struct device *dev;  	int err;  	vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); -	dev = ice_pf_to_dev(pf);  	vsi = ice_vf_vsi_setup(vf);  	if (!vsi)  		return -ENOMEM; -	err = ice_vsi_add_vlan_zero(vsi); -	if (err) { -		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", -			 vf->vf_id); -		goto release_vsi; -	} - -	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); -	err = vlan_ops->ena_rx_filtering(vsi); -	if (err) { -		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", -			 vf->vf_id); -		goto release_vsi; -	} - -	eth_broadcast_addr(broadcast); -	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); -	if (err) { -		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", -			vf->vf_id, err); -		goto release_vsi; -	} - -	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); -	if (err) { -		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", -			 vf->vf_id); +	err = ice_vf_init_host_cfg(vf, vsi); +	if (err)  		goto release_vsi; -	} - -	vf->num_mac = 1;  	return 0; @@ -697,6 +655,21 @@ static void ice_sriov_free_vf(struct ice_vf *vf)  }  /** + * ice_sriov_clear_reset_state - clears VF Reset status register + * @vf: the vf to configure + */ +static void ice_sriov_clear_reset_state(struct ice_vf *vf) +{ +	struct ice_hw *hw = &vf->pf->hw; + +	/* Clear the reset status register so that VF immediately sees that +	 * the device is resetting, even if hardware hasn't yet gotten around +	 * to clearing VFGEN_RSTAT for us. +	 */ +	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS); +} + +/**   * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers   * @vf: the vf to configure   */ @@ -799,23 +772,19 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)  }  /** - * ice_sriov_vsi_rebuild - release and rebuild VF's VSI - * @vf: VF to release and setup the VSI for + * ice_sriov_create_vsi - Create a new VSI for a VF + * @vf: VF to create the VSI for   * - * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF - * configuration change, etc.). + * This is called by ice_vf_recreate_vsi to create the new VSI after the old + * VSI has been released.   */ -static int ice_sriov_vsi_rebuild(struct ice_vf *vf) +static int ice_sriov_create_vsi(struct ice_vf *vf)  { -	struct ice_pf *pf = vf->pf; +	struct ice_vsi *vsi; -	ice_vf_vsi_release(vf); -	if (!ice_vf_vsi_setup(vf)) { -		dev_err(ice_pf_to_dev(pf), -			"Failed to release and setup the VF%u's VSI\n", -			vf->vf_id); +	vsi = ice_vf_vsi_setup(vf); +	if (!vsi)  		return -ENOMEM; -	}  	return 0;  } @@ -826,8 +795,6 @@ static int ice_sriov_vsi_rebuild(struct ice_vf *vf)   */  static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)  { -	ice_vf_rebuild_host_cfg(vf); -	ice_vf_set_initialized(vf);  	ice_ena_vf_mappings(vf);  	wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);  } @@ -835,11 +802,13 @@ static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)  static const struct ice_vf_ops ice_sriov_vf_ops = {  	.reset_type = ICE_VF_RESET,  	.free = ice_sriov_free_vf, +	.clear_reset_state = ice_sriov_clear_reset_state,  	.clear_mbx_register = ice_sriov_clear_mbx_register,  	.trigger_reset_register = ice_sriov_trigger_reset_register,  	.poll_reset_status = ice_sriov_poll_reset_status,  	.clear_reset_trigger = ice_sriov_clear_reset_trigger, -	.vsi_rebuild = ice_sriov_vsi_rebuild, +	.irq_close = NULL, +	.create_vsi = ice_sriov_create_vsi,  	.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,  }; @@ -879,21 +848,9 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)  		/* set sriov vf ops for VFs created during SRIOV flow */  		vf->vf_ops = &ice_sriov_vf_ops; -		vf->vf_sw_id = pf->first_sw; -		/* assign default capabilities */ -		vf->spoofchk = true; -		vf->num_vf_qs = pf->vfs.num_qps_per; -		ice_vc_set_default_allowlist(vf); - -		/* ctrl_vsi_idx will be set to a valid value only when VF -		 * creates its first fdir rule. -		 */ -		ice_vf_ctrl_invalidate_vsi(vf); -		ice_vf_fdir_init(vf); - -		ice_virtchnl_set_dflt_ops(vf); +		ice_initialize_vf_entry(vf); -		mutex_init(&vf->cfg_lock); +		vf->vf_sw_id = pf->first_sw;  		hash_add_rcu(vfs->table, &vf->entry, vf_id);  	} @@ -1285,7 +1242,7 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)  		goto out_put_vf;  	ivi->vf = vf_id; -	ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr); +	ether_addr_copy(ivi->mac, vf->hw_lan_addr);  	/* VF configuration for VLAN and applicable QoS */  	ivi->vlan = ice_vf_get_port_vlan_id(vf); @@ -1333,8 +1290,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)  		return -EINVAL;  	/* nothing left to do, unicast MAC already set */ -	if (ether_addr_equal(vf->dev_lan_addr.addr, mac) && -	    ether_addr_equal(vf->hw_lan_addr.addr, mac)) { +	if (ether_addr_equal(vf->dev_lan_addr, mac) && +	    ether_addr_equal(vf->hw_lan_addr, mac)) {  		ret = 0;  		goto out_put_vf;  	} @@ -1348,8 +1305,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)  	/* VF is notified of its new MAC via the PF's response to the  	 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset  	 */ -	ether_addr_copy(vf->dev_lan_addr.addr, mac); -	ether_addr_copy(vf->hw_lan_addr.addr, mac); +	ether_addr_copy(vf->dev_lan_addr, mac); +	ether_addr_copy(vf->hw_lan_addr, mac);  	if (is_zero_ether_addr(mac)) {  		/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */  		vf->pf_set_mac = false; @@ -1750,7 +1707,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)  	dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",  		 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, -		 vf->dev_lan_addr.addr, +		 vf->dev_lan_addr,  		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)  			  ? "on" : "off");  } @@ -1794,7 +1751,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)  			dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",  				 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, -				 vf->dev_lan_addr.addr); +				 vf->dev_lan_addr);  		}  	}  	mutex_unlock(&pf->vfs.table_lock); @@ -1884,7 +1841,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,  			if (pf_vsi)  				dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", -					 &vf->dev_lan_addr.addr[0], +					 &vf->dev_lan_addr[0],  					 pf_vsi->netdev->dev_addr);  		}  	} diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 9b762f7972ce..61f844d22512 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,  	 */  	status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));  	if (status) -		goto err_free_lkup_exts; +		goto err_unroll;  	/* Group match words into recipes using preferred recipe grouping  	 * criteria. diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index f68c555be4e9..6b48cbc049c6 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -724,7 +724,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)  	 */  	fltr->rid = rule_added.rid;  	fltr->rule_id = rule_added.rule_id; -	fltr->dest_id = rule_added.vsi_handle; +	fltr->dest_vsi_handle = rule_added.vsi_handle;  exit:  	kfree(list); @@ -732,6 +732,116 @@ exit:  }  /** + * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action) + * @vsi: Pointer to VSI + * @tc_fltr: Pointer to tc_flower_filter + * + * Locate the VSI using specified queue. When ADQ is not enabled, always + * return input VSI, otherwise locate corresponding VSI based on per channel + * offset and qcount + */ +static struct ice_vsi * +ice_locate_vsi_using_queue(struct ice_vsi *vsi, +			   struct ice_tc_flower_fltr *tc_fltr) +{ +	int num_tc, tc, queue; + +	/* if ADQ is not active, passed VSI is the candidate VSI */ +	if (!ice_is_adq_active(vsi->back)) +		return vsi; + +	/* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending +	 * upon queue number) +	 */ +	num_tc = vsi->mqprio_qopt.qopt.num_tc; +	queue = tc_fltr->action.fwd.q.queue; + +	for (tc = 0; tc < num_tc; tc++) { +		int qcount = vsi->mqprio_qopt.qopt.count[tc]; +		int offset = vsi->mqprio_qopt.qopt.offset[tc]; + +		if (queue >= offset && queue < offset + qcount) { +			/* for non-ADQ TCs, passed VSI is the candidate VSI */ +			if (tc < ICE_CHNL_START_TC) +				return vsi; +			else +				return vsi->tc_map_vsi[tc]; +		} +	} +	return NULL; +} + +static struct ice_rx_ring * +ice_locate_rx_ring_using_queue(struct ice_vsi *vsi, +			       struct ice_tc_flower_fltr *tc_fltr) +{ +	u16 queue = tc_fltr->action.fwd.q.queue; + +	return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL; +} + +/** + * ice_tc_forward_action - Determine destination VSI and queue for the action + * @vsi: Pointer to VSI + * @tc_fltr: Pointer to TC flower filter structure + * + * Validates the tc forward action and determines the destination VSI and queue + * for the forward action. + */ +static struct ice_vsi * +ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) +{ +	struct ice_rx_ring *ring = NULL; +	struct ice_vsi *dest_vsi = NULL; +	struct ice_pf *pf = vsi->back; +	struct device *dev; +	u32 tc_class; + +	dev = ice_pf_to_dev(pf); + +	/* Get the destination VSI and/or destination queue and validate them */ +	switch (tc_fltr->action.fltr_act) { +	case ICE_FWD_TO_VSI: +		tc_class = tc_fltr->action.fwd.tc.tc_class; +		/* Select the destination VSI */ +		if (tc_class < ICE_CHNL_START_TC) { +			NL_SET_ERR_MSG_MOD(tc_fltr->extack, +					   "Unable to add filter because of unsupported destination"); +			return ERR_PTR(-EOPNOTSUPP); +		} +		/* Locate ADQ VSI depending on hw_tc number */ +		dest_vsi = vsi->tc_map_vsi[tc_class]; +		break; +	case ICE_FWD_TO_Q: +		/* Locate the Rx queue */ +		ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr); +		if (!ring) { +			dev_err(dev, +				"Unable to locate Rx queue for action fwd_to_queue: %u\n", +				tc_fltr->action.fwd.q.queue); +			return ERR_PTR(-EINVAL); +		} +		/* Determine destination VSI even though the action is +		 * FWD_TO_QUEUE, because QUEUE is associated with VSI +		 */ +		dest_vsi = tc_fltr->dest_vsi; +		break; +	default: +		dev_err(dev, +			"Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n", +			tc_fltr->action.fltr_act); +		return ERR_PTR(-EINVAL); +	} +	/* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */ +	if (!dest_vsi) { +		dev_err(dev, +			"Unable to add filter because specified destination VSI doesn't exist\n"); +		return ERR_PTR(-EINVAL); +	} +	return dest_vsi; +} + +/**   * ice_add_tc_flower_adv_fltr - add appropriate filter rules   * @vsi: Pointer to VSI   * @tc_fltr: Pointer to TC flower filter structure @@ -750,7 +860,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,  	struct ice_pf *pf = vsi->back;  	struct ice_hw *hw = &pf->hw;  	u32 flags = tc_fltr->flags; -	struct ice_vsi *ch_vsi; +	struct ice_vsi *dest_vsi;  	struct device *dev;  	u16 lkups_cnt = 0;  	u16 l4_proto = 0; @@ -772,11 +882,12 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,  		return -EOPNOTSUPP;  	} -	/* get the channel (aka ADQ VSI) */ -	if (tc_fltr->dest_vsi) -		ch_vsi = tc_fltr->dest_vsi; -	else -		ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class]; +	/* validate forwarding action VSI and queue */ +	if (ice_is_forward_action(tc_fltr->action.fltr_act)) { +		dest_vsi = ice_tc_forward_action(vsi, tc_fltr); +		if (IS_ERR(dest_vsi)) +			return PTR_ERR(dest_vsi); +	}  	lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);  	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); @@ -790,30 +901,41 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,  	}  	rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; -	if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) { -		if (!ch_vsi) { -			NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist"); -			ret = -EINVAL; -			goto exit; -		} +	/* specify the cookie as filter_rule_id */ +	rule_info.fltr_rule_id = tc_fltr->cookie; -		rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; -		rule_info.sw_act.vsi_handle = ch_vsi->idx; -		rule_info.priority = 7; +	switch (tc_fltr->action.fltr_act) { +	case ICE_FWD_TO_VSI: +		rule_info.sw_act.vsi_handle = dest_vsi->idx; +		rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;  		rule_info.sw_act.src = hw->pf_id;  		rule_info.rx = true;  		dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n", -			tc_fltr->action.tc_class, +			tc_fltr->action.fwd.tc.tc_class,  			rule_info.sw_act.vsi_handle, lkups_cnt); -	} else { -		rule_info.sw_act.flag |= ICE_FLTR_TX; -		rule_info.sw_act.src = vsi->idx; -		rule_info.rx = false; +		break; +	case ICE_FWD_TO_Q: +		/* HW queue number in global space */ +		rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue; +		rule_info.sw_act.vsi_handle = dest_vsi->idx; +		rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; +		rule_info.sw_act.src = hw->pf_id; +		rule_info.rx = true; +		dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n", +			tc_fltr->action.fwd.q.queue, +			tc_fltr->action.fwd.q.hw_queue, lkups_cnt); +		break; +	case ICE_DROP_PACKET: +		rule_info.sw_act.flag |= ICE_FLTR_RX; +		rule_info.sw_act.src = hw->pf_id; +		rule_info.rx = true; +		rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; +		break; +	default: +		ret = -EOPNOTSUPP; +		goto exit;  	} -	/* specify the cookie as filter_rule_id */ -	rule_info.fltr_rule_id = tc_fltr->cookie; -  	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);  	if (ret == -EEXIST) {  		NL_SET_ERR_MSG_MOD(tc_fltr->extack, @@ -831,19 +953,14 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,  	 */  	tc_fltr->rid = rule_added.rid;  	tc_fltr->rule_id = rule_added.rule_id; -	if (tc_fltr->action.tc_class > 0 && ch_vsi) { -		/* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and -		 * for PF ADQ filter, it is not yet set in tc_fltr, -		 * hence store the dest_vsi ptr in tc_fltr -		 */ -		if (ch_vsi->type == ICE_VSI_CHNL) -			tc_fltr->dest_vsi = ch_vsi; +	tc_fltr->dest_vsi_handle = rule_added.vsi_handle; +	if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI || +	    tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { +		tc_fltr->dest_vsi = dest_vsi;  		/* keep track of advanced switch filter for -		 * destination VSI (channel VSI) +		 * destination VSI  		 */ -		ch_vsi->num_chnl_fltr++; -		/* in this case, dest_id is VSI handle (sw handle) */ -		tc_fltr->dest_id = rule_added.vsi_handle; +		dest_vsi->num_chnl_fltr++;  		/* keeps track of channel filters for PF VSI */  		if (vsi->type == ICE_VSI_PF && @@ -851,10 +968,26 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,  			      ICE_TC_FLWR_FIELD_ENC_DST_MAC)))  			pf->num_dmac_chnl_fltrs++;  	} -	dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n", -		lkups_cnt, flags, -		tc_fltr->action.tc_class, rule_added.rid, -		rule_added.rule_id, rule_added.vsi_handle); +	switch (tc_fltr->action.fltr_act) { +	case ICE_FWD_TO_VSI: +		dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n", +			lkups_cnt, flags, +			tc_fltr->action.fwd.tc.tc_class, rule_added.rid, +			rule_added.rule_id, rule_added.vsi_handle); +		break; +	case ICE_FWD_TO_Q: +		dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u)     , rid %u, rule_id %u\n", +			lkups_cnt, flags, tc_fltr->action.fwd.q.queue, +			tc_fltr->action.fwd.q.hw_queue, rule_added.rid, +			rule_added.rule_id); +		break; +	case ICE_DROP_PACKET: +		dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n", +			lkups_cnt, flags, rule_added.rid, rule_added.rule_id); +		break; +	default: +		break; +	}  exit:  	kfree(list);  	return ret; @@ -1455,43 +1588,15 @@ ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)  }  /** - * ice_handle_tclass_action - Support directing to a traffic class + * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers   * @vsi: Pointer to VSI - * @cls_flower: Pointer to TC flower offload structure   * @fltr: Pointer to TC flower filter structure   * - * Support directing traffic to a traffic class + * Prepare ADQ filter with the required additional header fields   */  static int -ice_handle_tclass_action(struct ice_vsi *vsi, -			 struct flow_cls_offload *cls_flower, -			 struct ice_tc_flower_fltr *fltr) +ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)  { -	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); -	struct ice_vsi *main_vsi; - -	if (tc < 0) { -		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid"); -		return -EINVAL; -	} -	if (!tc) { -		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination"); -		return -EINVAL; -	} - -	if (!(vsi->all_enatc & BIT(tc))) { -		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination"); -		return -EINVAL; -	} - -	/* Redirect to a TC class or Queue Group */ -	main_vsi = ice_get_main_vsi(vsi->back); -	if (!main_vsi || !main_vsi->netdev) { -		NL_SET_ERR_MSG_MOD(fltr->extack, -				   "Unable to add filter because of invalid netdevice"); -		return -EINVAL; -	} -  	if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&  	    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |  			   ICE_TC_FLWR_FIELD_SRC_MAC))) { @@ -1503,9 +1608,8 @@ ice_handle_tclass_action(struct ice_vsi *vsi,  	/* For ADQ, filter must include dest MAC address, otherwise unwanted  	 * packets with unrelated MAC address get delivered to ADQ VSIs as long  	 * as remaining filter criteria is satisfied such as dest IP address -	 * and dest/src L4 port. Following code is trying to handle: -	 * 1. For non-tunnel, if user specify MAC addresses, use them (means -	 * this code won't do anything +	 * and dest/src L4 port. Below code handles the following cases: +	 * 1. For non-tunnel, if user specify MAC addresses, use them.  	 * 2. For non-tunnel, if user didn't specify MAC address, add implicit  	 * dest MAC to be lower netdev's active unicast MAC address  	 * 3. For tunnel,  as of now TC-filter through flower classifier doesn't @@ -1528,35 +1632,100 @@ ice_handle_tclass_action(struct ice_vsi *vsi,  		eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);  	} -	/* validate specified dest MAC address, make sure either it belongs to -	 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as -	 * unicast MAC filter destined to main VSI. -	 */ -	if (!ice_mac_fltr_exist(&main_vsi->back->hw, -				fltr->outer_headers.l2_key.dst_mac, -				main_vsi->idx)) { -		NL_SET_ERR_MSG_MOD(fltr->extack, -				   "Unable to add filter because legacy MAC filter for specified destination doesn't exist"); -		return -EINVAL; -	} -  	/* Make sure VLAN is already added to main VSI, before allowing ADQ to  	 * add a VLAN based filter such as MAC + VLAN + L4 port.  	 */  	if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {  		u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id); -		if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id, -					 main_vsi->idx)) { +		if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) {  			NL_SET_ERR_MSG_MOD(fltr->extack,  					   "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");  			return -EINVAL;  		}  	} +	return 0; +} + +/** + * ice_handle_tclass_action - Support directing to a traffic class + * @vsi: Pointer to VSI + * @cls_flower: Pointer to TC flower offload structure + * @fltr: Pointer to TC flower filter structure + * + * Support directing traffic to a traffic class/queue-set + */ +static int +ice_handle_tclass_action(struct ice_vsi *vsi, +			 struct flow_cls_offload *cls_flower, +			 struct ice_tc_flower_fltr *fltr) +{ +	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); + +	/* user specified hw_tc (must be non-zero for ADQ TC), action is forward +	 * to hw_tc (i.e. ADQ channel number) +	 */ +	if (tc < ICE_CHNL_START_TC) { +		NL_SET_ERR_MSG_MOD(fltr->extack, +				   "Unable to add filter because of unsupported destination"); +		return -EOPNOTSUPP; +	} +	if (!(vsi->all_enatc & BIT(tc))) { +		NL_SET_ERR_MSG_MOD(fltr->extack, +				   "Unable to add filter because of non-existence destination"); +		return -EINVAL; +	}  	fltr->action.fltr_act = ICE_FWD_TO_VSI; -	fltr->action.tc_class = tc; +	fltr->action.fwd.tc.tc_class = tc; -	return 0; +	return ice_prep_adq_filter(vsi, fltr); +} + +static int +ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, +			struct flow_action_entry *act) +{ +	struct ice_vsi *ch_vsi = NULL; +	u16 queue = act->rx_queue; + +	if (queue >= vsi->num_rxq) { +		NL_SET_ERR_MSG_MOD(fltr->extack, +				   "Unable to add filter because specified queue is invalid"); +		return -EINVAL; +	} +	fltr->action.fltr_act = ICE_FWD_TO_Q; +	fltr->action.fwd.q.queue = queue; +	/* determine corresponding HW queue */ +	fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue]; + +	/* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare +	 * ADQ switch filter +	 */ +	ch_vsi = ice_locate_vsi_using_queue(vsi, fltr); +	if (!ch_vsi) +		return -EINVAL; +	fltr->dest_vsi = ch_vsi; +	if (!ice_is_chnl_fltr(fltr)) +		return 0; + +	return ice_prep_adq_filter(vsi, fltr); +} + +static int +ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, +		    struct flow_action_entry *act) +{ +	switch (act->id) { +	case FLOW_ACTION_RX_QUEUE_MAPPING: +		/* forward to queue */ +		return ice_tc_forward_to_queue(vsi, fltr, act); +	case FLOW_ACTION_DROP: +		fltr->action.fltr_act = ICE_DROP_PACKET; +		return 0; +	default: +		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action"); +		return -EOPNOTSUPP; +	}  }  /** @@ -1575,7 +1744,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,  	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);  	struct flow_action *flow_action = &rule->action;  	struct flow_action_entry *act; -	int i; +	int i, err;  	if (cls_flower->classid)  		return ice_handle_tclass_action(vsi, cls_flower, fltr); @@ -1584,21 +1753,13 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,  		return -EINVAL;  	flow_action_for_each(i, act, flow_action) { -		if (ice_is_eswitch_mode_switchdev(vsi->back)) { -			int err = ice_eswitch_tc_parse_action(fltr, act); - -			if (err) -				return err; -			continue; -		} -		/* Allow only one rule per filter */ - -		/* Drop action */ -		if (act->id == FLOW_ACTION_DROP) { -			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP"); -			return -EINVAL; -		} -		fltr->action.fltr_act = ICE_FWD_TO_VSI; +		if (ice_is_eswitch_mode_switchdev(vsi->back)) +			err = ice_eswitch_tc_parse_action(fltr, act); +		else +			err = ice_tc_parse_action(vsi, fltr, act); +		if (err) +			return err; +		continue;  	}  	return 0;  } @@ -1618,7 +1779,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)  	rule_rem.rid = fltr->rid;  	rule_rem.rule_id = fltr->rule_id; -	rule_rem.vsi_handle = fltr->dest_id; +	rule_rem.vsi_handle = fltr->dest_vsi_handle;  	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);  	if (err) {  		if (err == -ENOENT) { diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index 92642faad595..8d5e22ac7023 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -45,7 +45,20 @@ struct ice_indr_block_priv {  };  struct ice_tc_flower_action { -	u32 tc_class; +	/* forward action specific params */ +	union { +		struct { +			u32 tc_class; /* forward to hw_tc */ +			u32 rsvd; +		} tc; +		struct { +			u16 queue; /* forward to queue */ +			/* To add filter in HW, absolute queue number in global +			 * space of queues (between 0...N) is needed +			 */ +			u16 hw_queue; +		} q; +	} fwd;  	enum ice_sw_fwd_act_type fltr_act;  }; @@ -131,11 +144,11 @@ struct ice_tc_flower_fltr {  	 */  	u16 rid;  	u16 rule_id; -	/* this could be queue/vsi_idx (sw handle)/queue_group, depending upon -	 * destination type +	/* VSI handle of the destination VSI (it could be main PF VSI, CHNL_VSI, +	 * VF VSI)  	 */ -	u16 dest_id; -	/* if dest_id is vsi_idx, then need to store destination VSI ptr */ +	u16 dest_vsi_handle; +	/* ptr to destination VSI */  	struct ice_vsi *dest_vsi;  	/* direction of fltr for eswitch use case */  	enum ice_eswitch_fltr_direction direction; @@ -162,12 +175,23 @@ struct ice_tc_flower_fltr {   * @f: Pointer to tc-flower filter   *   * Criteria to determine of given filter is valid channel filter - * or not is based on its "destination". If destination is hw_tc (aka tc_class) - * and it is non-zero, then it is valid channel (aka ADQ) filter + * or not is based on its destination. + * For forward to VSI action, if destination is valid hw_tc (aka tc_class) + * and in supported range of TCs for ADQ, then return true. + * For forward to queue, as long as dest_vsi is valid and it is of type + * VSI_CHNL (PF ADQ VSI is of type VSI_CHNL), return true. + * NOTE: For forward to queue, correct dest_vsi is still set in tc_fltr based + * on destination queue specified.   */  static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)  { -	return !!f->action.tc_class; +	if (f->action.fltr_act == ICE_FWD_TO_VSI) +		return f->action.fwd.tc.tc_class >= ICE_CHNL_START_TC && +		       f->action.fwd.tc.tc_class < ICE_CHNL_MAX_TC; +	else if (f->action.fltr_act == ICE_FWD_TO_Q) +		return f->dest_vsi && f->dest_vsi->type == ICE_VSI_CHNL; + +	return false;  }  /** @@ -187,4 +211,14 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);  void ice_replay_tc_fltrs(struct ice_pf *pf);  bool ice_is_tunnel_supported(struct net_device *dev); +static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act) +{ +	switch (fltr_act) { +	case ICE_FWD_TO_VSI: +	case ICE_FWD_TO_Q: +		return true; +	default: +		return false; +	} +}  #endif /* _ICE_TC_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index dbe80e5053a8..dfd22862e926 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -85,7 +85,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,  	td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |  		 ICE_TX_DESC_CMD_RE; -	tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; +	tx_buf->type = ICE_TX_BUF_DUMMY;  	tx_buf->raw_buf = raw_packet;  	tx_desc->cmd_type_offset_bsz = @@ -112,27 +112,29 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,  static void  ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)  { -	if (tx_buf->skb) { -		if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) -			devm_kfree(ring->dev, tx_buf->raw_buf); -		else if (ice_ring_is_xdp(ring)) -			page_frag_free(tx_buf->raw_buf); -		else -			dev_kfree_skb_any(tx_buf->skb); -		if (dma_unmap_len(tx_buf, len)) -			dma_unmap_single(ring->dev, -					 dma_unmap_addr(tx_buf, dma), -					 dma_unmap_len(tx_buf, len), -					 DMA_TO_DEVICE); -	} else if (dma_unmap_len(tx_buf, len)) { +	if (dma_unmap_len(tx_buf, len))  		dma_unmap_page(ring->dev,  			       dma_unmap_addr(tx_buf, dma),  			       dma_unmap_len(tx_buf, len),  			       DMA_TO_DEVICE); + +	switch (tx_buf->type) { +	case ICE_TX_BUF_DUMMY: +		devm_kfree(ring->dev, tx_buf->raw_buf); +		break; +	case ICE_TX_BUF_SKB: +		dev_kfree_skb_any(tx_buf->skb); +		break; +	case ICE_TX_BUF_XDP_TX: +		page_frag_free(tx_buf->raw_buf); +		break; +	case ICE_TX_BUF_XDP_XMIT: +		xdp_return_frame(tx_buf->xdpf); +		break;  	}  	tx_buf->next_to_watch = NULL; -	tx_buf->skb = NULL; +	tx_buf->type = ICE_TX_BUF_EMPTY;  	dma_unmap_len_set(tx_buf, len, 0);  	/* tx_buf must be completely set up in the transmit path */  } @@ -174,8 +176,6 @@ tx_skip_free:  	tx_ring->next_to_use = 0;  	tx_ring->next_to_clean = 0; -	tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1; -	tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;  	if (!tx_ring->netdev)  		return; @@ -267,7 +267,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)  				 DMA_TO_DEVICE);  		/* clear tx_buf data */ -		tx_buf->skb = NULL; +		tx_buf->type = ICE_TX_BUF_EMPTY;  		dma_unmap_len_set(tx_buf, len, 0);  		/* unmap remaining buffers */ @@ -325,7 +325,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)  		if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&  		    !test_bit(ICE_VSI_DOWN, vsi->state)) {  			netif_tx_wake_queue(txring_txq(tx_ring)); -			++tx_ring->tx_stats.restart_q; +			++tx_ring->ring_stats->tx_stats.restart_q;  		}  	} @@ -367,7 +367,7 @@ int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)  	tx_ring->next_to_use = 0;  	tx_ring->next_to_clean = 0; -	tx_ring->tx_stats.prev_pkt = -1; +	tx_ring->ring_stats->tx_stats.prev_pkt = -1;  	return 0;  err: @@ -382,6 +382,7 @@ err:   */  void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)  { +	struct xdp_buff *xdp = &rx_ring->xdp;  	struct device *dev = rx_ring->dev;  	u32 size;  	u16 i; @@ -390,16 +391,16 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)  	if (!rx_ring->rx_buf)  		return; -	if (rx_ring->skb) { -		dev_kfree_skb(rx_ring->skb); -		rx_ring->skb = NULL; -	} -  	if (rx_ring->xsk_pool) {  		ice_xsk_clean_rx_ring(rx_ring);  		goto rx_skip_free;  	} +	if (xdp->data) { +		xdp_return_buff(xdp); +		xdp->data = NULL; +	} +  	/* Free all the Rx ring sk_buffs */  	for (i = 0; i < rx_ring->count; i++) {  		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; @@ -437,6 +438,7 @@ rx_skip_free:  	rx_ring->next_to_alloc = 0;  	rx_ring->next_to_clean = 0; +	rx_ring->first_desc = 0;  	rx_ring->next_to_use = 0;  } @@ -506,6 +508,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)  	rx_ring->next_to_use = 0;  	rx_ring->next_to_clean = 0; +	rx_ring->first_desc = 0;  	if (ice_is_xdp_ena_vsi(rx_ring->vsi))  		WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); @@ -523,8 +526,16 @@ err:  	return -ENOMEM;  } +/** + * ice_rx_frame_truesize + * @rx_ring: ptr to Rx ring + * @size: size + * + * calculate the truesize with taking into the account PAGE_SIZE of + * underlying arch + */  static unsigned int -ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size) +ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)  {  	unsigned int truesize; @@ -545,34 +556,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused s   * @xdp: xdp_buff used as input to the XDP program   * @xdp_prog: XDP program to run   * @xdp_ring: ring to be used for XDP_TX action + * @rx_buf: Rx buffer to store the XDP action   *   * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}   */ -static int +static void  ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, -	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) +	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, +	    struct ice_rx_buf *rx_buf)  { -	int err; +	unsigned int ret = ICE_XDP_PASS;  	u32 act; +	if (!xdp_prog) +		goto exit; +  	act = bpf_prog_run_xdp(xdp_prog, xdp);  	switch (act) {  	case XDP_PASS: -		return ICE_XDP_PASS; +		break;  	case XDP_TX:  		if (static_branch_unlikely(&ice_xdp_locking_key))  			spin_lock(&xdp_ring->tx_lock); -		err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); +		ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);  		if (static_branch_unlikely(&ice_xdp_locking_key))  			spin_unlock(&xdp_ring->tx_lock); -		if (err == ICE_XDP_CONSUMED) +		if (ret == ICE_XDP_CONSUMED)  			goto out_failure; -		return err; +		break;  	case XDP_REDIRECT: -		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); -		if (err) +		if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))  			goto out_failure; -		return ICE_XDP_REDIR; +		ret = ICE_XDP_REDIR; +		break;  	default:  		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);  		fallthrough; @@ -581,8 +597,31 @@ out_failure:  		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);  		fallthrough;  	case XDP_DROP: -		return ICE_XDP_CONSUMED; +		ret = ICE_XDP_CONSUMED;  	} +exit: +	rx_buf->act = ret; +	if (unlikely(xdp_buff_has_frags(xdp))) +		ice_set_rx_bufs_act(xdp, rx_ring, ret); +} + +/** + * ice_xmit_xdp_ring - submit frame to XDP ring for transmission + * @xdpf: XDP frame that will be converted to XDP buff + * @xdp_ring: XDP ring for transmission + */ +static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf, +			     struct ice_tx_ring *xdp_ring) +{ +	struct xdp_buff xdp; + +	xdp.data_hard_start = (void *)xdpf; +	xdp.data = xdpf->data; +	xdp.data_end = xdp.data + xdpf->len; +	xdp.frame_sz = xdpf->frame_sz; +	xdp.flags = xdpf->flags; + +	return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);  }  /** @@ -605,6 +644,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,  	unsigned int queue_index = smp_processor_id();  	struct ice_vsi *vsi = np->vsi;  	struct ice_tx_ring *xdp_ring; +	struct ice_tx_buf *tx_buf;  	int nxmit = 0, i;  	if (test_bit(ICE_VSI_DOWN, vsi->state)) @@ -627,16 +667,18 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,  		xdp_ring = vsi->xdp_rings[queue_index];  	} +	tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];  	for (i = 0; i < n; i++) { -		struct xdp_frame *xdpf = frames[i]; +		const struct xdp_frame *xdpf = frames[i];  		int err; -		err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); +		err = ice_xmit_xdp_ring(xdpf, xdp_ring);  		if (err != ICE_XDP_TX)  			break;  		nxmit++;  	} +	tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);  	if (unlikely(flags & XDP_XMIT_FLUSH))  		ice_xdp_ring_update_tail(xdp_ring); @@ -667,7 +709,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)  	/* alloc new page for storage */  	page = dev_alloc_pages(ice_rx_pg_order(rx_ring));  	if (unlikely(!page)) { -		rx_ring->rx_stats.alloc_page_failed++; +		rx_ring->ring_stats->rx_stats.alloc_page_failed++;  		return false;  	} @@ -680,7 +722,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)  	 */  	if (dma_mapping_error(rx_ring->dev, dma)) {  		__free_pages(page, ice_rx_pg_order(rx_ring)); -		rx_ring->rx_stats.alloc_page_failed++; +		rx_ring->ring_stats->rx_stats.alloc_page_failed++;  		return false;  	} @@ -706,7 +748,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)   * buffers. Then bump tail at most one time. Grouping like this lets us avoid   * multiple tail writes per call.   */ -bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) +bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)  {  	union ice_32b_rx_flex_desc *rx_desc;  	u16 ntu = rx_ring->next_to_use; @@ -783,7 +825,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)  /**   * ice_can_reuse_rx_page - Determine if page can be reused for another Rx   * @rx_buf: buffer containing the page - * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call   *   * If page is reusable, we have a green light for calling ice_reuse_rx_page,   * which will assign the current buffer to the buffer that next_to_alloc is @@ -791,7 +832,7 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)   * page freed   */  static bool -ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) +ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)  {  	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;  	struct page *page = rx_buf->page; @@ -802,7 +843,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)  #if (PAGE_SIZE < 8192)  	/* if we are only owner of page we can reuse it */ -	if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) +	if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))  		return false;  #else  #define ICE_LAST_OFFSET \ @@ -824,33 +865,44 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)  }  /** - * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag + * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag   * @rx_ring: Rx descriptor ring to transact packets on + * @xdp: xdp buff to place the data into   * @rx_buf: buffer containing page to add - * @skb: sk_buff to place the data into   * @size: packet length from rx_desc   * - * This function will add the data contained in rx_buf->page to the skb. - * It will just attach the page as a frag to the skb. - * The function will then update the page offset. + * This function will add the data contained in rx_buf->page to the xdp buf. + * It will just attach the page as a frag.   */ -static void -ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, -		struct sk_buff *skb, unsigned int size) +static int +ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, +		 struct ice_rx_buf *rx_buf, const unsigned int size)  { -#if (PAGE_SIZE >= 8192) -	unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); -#else -	unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; -#endif +	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);  	if (!size) -		return; -	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, -			rx_buf->page_offset, size, truesize); +		return 0; + +	if (!xdp_buff_has_frags(xdp)) { +		sinfo->nr_frags = 0; +		sinfo->xdp_frags_size = 0; +		xdp_buff_set_frags_flag(xdp); +	} -	/* page is being used so we must update the page offset */ -	ice_rx_buf_adjust_pg_offset(rx_buf, truesize); +	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { +		if (unlikely(xdp_buff_has_frags(xdp))) +			ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); +		return -ENOMEM; +	} + +	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, +				   rx_buf->page_offset, size); +	sinfo->xdp_frags_size += size; + +	if (page_is_pfmemalloc(rx_buf->page)) +		xdp_buff_set_frag_pfmemalloc(xdp); + +	return 0;  }  /** @@ -886,19 +938,18 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)   * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use   * @rx_ring: Rx descriptor ring to transact packets on   * @size: size of buffer to add to skb - * @rx_buf_pgcnt: rx_buf page refcount   *   * This function will pull an Rx buffer from the ring and synchronize it   * for use by the CPU.   */  static struct ice_rx_buf *  ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, -	       int *rx_buf_pgcnt) +	       const unsigned int ntc)  {  	struct ice_rx_buf *rx_buf; -	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; -	*rx_buf_pgcnt = +	rx_buf = &rx_ring->rx_buf[ntc]; +	rx_buf->pgcnt =  #if (PAGE_SIZE < 8192)  		page_count(rx_buf->page);  #else @@ -922,26 +973,25 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,  /**   * ice_build_skb - Build skb around an existing buffer   * @rx_ring: Rx descriptor ring to transact packets on - * @rx_buf: Rx buffer to pull data from   * @xdp: xdp_buff pointing to the data   * - * This function builds an skb around an existing Rx buffer, taking care - * to set up the skb correctly and avoid any memcpy overhead. + * This function builds an skb around an existing XDP buffer, taking care + * to set up the skb correctly and avoid any memcpy overhead. Driver has + * already combined frags (if any) to skb_shared_info.   */  static struct sk_buff * -ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, -	      struct xdp_buff *xdp) +ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)  {  	u8 metasize = xdp->data - xdp->data_meta; -#if (PAGE_SIZE < 8192) -	unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; -#else -	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + -				SKB_DATA_ALIGN(xdp->data_end - -					       xdp->data_hard_start); -#endif +	struct skb_shared_info *sinfo = NULL; +	unsigned int nr_frags;  	struct sk_buff *skb; +	if (unlikely(xdp_buff_has_frags(xdp))) { +		sinfo = xdp_get_shared_info_from_buff(xdp); +		nr_frags = sinfo->nr_frags; +	} +  	/* Prefetch first cache line of first page. If xdp->data_meta  	 * is unused, this points exactly as xdp->data, otherwise we  	 * likely have a consumer accessing first few bytes of meta @@ -949,7 +999,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,  	 */  	net_prefetch(xdp->data_meta);  	/* build an skb around the page buffer */ -	skb = napi_build_skb(xdp->data_hard_start, truesize); +	skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);  	if (unlikely(!skb))  		return NULL; @@ -964,8 +1014,11 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,  	if (metasize)  		skb_metadata_set(skb, metasize); -	/* buffer is used by skb, update page_offset */ -	ice_rx_buf_adjust_pg_offset(rx_buf, truesize); +	if (unlikely(xdp_buff_has_frags(xdp))) +		xdp_update_skb_shared_info(skb, nr_frags, +					   sinfo->xdp_frags_size, +					   nr_frags * xdp->frame_sz, +					   xdp_buff_is_frag_pfmemalloc(xdp));  	return skb;  } @@ -981,24 +1034,30 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,   * skb correctly.   */  static struct sk_buff * -ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, -		  struct xdp_buff *xdp) +ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)  { -	unsigned int metasize = xdp->data - xdp->data_meta;  	unsigned int size = xdp->data_end - xdp->data; +	struct skb_shared_info *sinfo = NULL; +	struct ice_rx_buf *rx_buf; +	unsigned int nr_frags = 0;  	unsigned int headlen;  	struct sk_buff *skb;  	/* prefetch first cache line of first page */ -	net_prefetch(xdp->data_meta); +	net_prefetch(xdp->data); + +	if (unlikely(xdp_buff_has_frags(xdp))) { +		sinfo = xdp_get_shared_info_from_buff(xdp); +		nr_frags = sinfo->nr_frags; +	}  	/* allocate a skb to store the frags */ -	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, -			       ICE_RX_HDR_SIZE + metasize, +	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,  			       GFP_ATOMIC | __GFP_NOWARN);  	if (unlikely(!skb))  		return NULL; +	rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];  	skb_record_rx_queue(skb, rx_ring->q_index);  	/* Determine available headroom for copy */  	headlen = size; @@ -1006,32 +1065,42 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,  		headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);  	/* align pull length to size of long to optimize memcpy performance */ -	memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, -	       ALIGN(headlen + metasize, sizeof(long))); - -	if (metasize) { -		skb_metadata_set(skb, metasize); -		__skb_pull(skb, metasize); -	} +	memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, +							 sizeof(long)));  	/* if we exhaust the linear part then add what is left as a frag */  	size -= headlen;  	if (size) { -#if (PAGE_SIZE >= 8192) -		unsigned int truesize = SKB_DATA_ALIGN(size); -#else -		unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; -#endif +		/* besides adding here a partial frag, we are going to add +		 * frags from xdp_buff, make sure there is enough space for +		 * them +		 */ +		if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { +			dev_kfree_skb(skb); +			return NULL; +		}  		skb_add_rx_frag(skb, 0, rx_buf->page, -				rx_buf->page_offset + headlen, size, truesize); -		/* buffer is used by skb, update page_offset */ -		ice_rx_buf_adjust_pg_offset(rx_buf, truesize); +				rx_buf->page_offset + headlen, size, +				xdp->frame_sz);  	} else { -		/* buffer is unused, reset bias back to rx_buf; data was copied -		 * onto skb's linear part so there's no need for adjusting -		 * page offset and we can reuse this buffer as-is +		/* buffer is unused, change the act that should be taken later +		 * on; data was copied onto skb's linear part so there's no +		 * need for adjusting page offset and we can reuse this buffer +		 * as-is  		 */ -		rx_buf->pagecnt_bias++; +		rx_buf->act = ICE_SKB_CONSUMED; +	} + +	if (unlikely(xdp_buff_has_frags(xdp))) { +		struct skb_shared_info *skinfo = skb_shinfo(skb); + +		memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], +		       sizeof(skb_frag_t) * nr_frags); + +		xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, +					   sinfo->xdp_frags_size, +					   nr_frags * xdp->frame_sz, +					   xdp_buff_is_frag_pfmemalloc(xdp));  	}  	return skb; @@ -1041,26 +1110,17 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,   * ice_put_rx_buf - Clean up used buffer and either recycle or free   * @rx_ring: Rx descriptor ring to transact packets on   * @rx_buf: Rx buffer to pull data from - * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()   * - * This function will update next_to_clean and then clean up the contents - * of the rx_buf. It will either recycle the buffer or unmap it and free - * the associated resources. + * This function will clean up the contents of the rx_buf. It will either + * recycle the buffer or unmap it and free the associated resources.   */  static void -ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, -	       int rx_buf_pgcnt) +ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)  { -	u16 ntc = rx_ring->next_to_clean + 1; - -	/* fetch, update, and store next to clean */ -	ntc = (ntc < rx_ring->count) ? ntc : 0; -	rx_ring->next_to_clean = ntc; -  	if (!rx_buf)  		return; -	if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { +	if (ice_can_reuse_rx_page(rx_buf)) {  		/* hand second half of page back to the ring */  		ice_reuse_rx_page(rx_ring, rx_buf);  	} else { @@ -1076,27 +1136,6 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,  }  /** - * ice_is_non_eop - process handling of non-EOP buffers - * @rx_ring: Rx ring being processed - * @rx_desc: Rx descriptor for current buffer - * - * If the buffer is an EOP buffer, this function exits returning false, - * otherwise return true indicating that this is in fact a non-EOP buffer. - */ -static bool -ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) -{ -	/* if we are the last buffer then there is nothing else to do */ -#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) -	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF))) -		return false; - -	rx_ring->rx_stats.non_eop_descs++; - -	return true; -} - -/**   * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf   * @rx_ring: Rx descriptor ring to transact packets on   * @budget: Total limit on number of packets to process @@ -1110,39 +1149,42 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)   */  int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)  { -	unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; -	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); +	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;  	unsigned int offset = rx_ring->rx_offset; +	struct xdp_buff *xdp = &rx_ring->xdp;  	struct ice_tx_ring *xdp_ring = NULL; -	unsigned int xdp_res, xdp_xmit = 0; -	struct sk_buff *skb = rx_ring->skb;  	struct bpf_prog *xdp_prog = NULL; -	struct xdp_buff xdp; +	u32 ntc = rx_ring->next_to_clean; +	u32 cnt = rx_ring->count; +	u32 cached_ntc = ntc; +	u32 xdp_xmit = 0; +	u32 cached_ntu;  	bool failure; +	u32 first;  	/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */  #if (PAGE_SIZE < 8192) -	frame_sz = ice_rx_frame_truesize(rx_ring, 0); +	xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);  #endif -	xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);  	xdp_prog = READ_ONCE(rx_ring->xdp_prog); -	if (xdp_prog) +	if (xdp_prog) {  		xdp_ring = rx_ring->xdp_ring; +		cached_ntu = xdp_ring->next_to_use; +	}  	/* start the loop to process Rx packets bounded by 'budget' */  	while (likely(total_rx_pkts < (unsigned int)budget)) {  		union ice_32b_rx_flex_desc *rx_desc;  		struct ice_rx_buf *rx_buf; -		unsigned char *hard_start; +		struct sk_buff *skb;  		unsigned int size;  		u16 stat_err_bits; -		int rx_buf_pgcnt;  		u16 vlan_tag = 0;  		u16 rx_ptype;  		/* get the Rx desc from Rx ring based on 'next_to_clean' */ -		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); +		rx_desc = ICE_RX_DESC(rx_ring, ntc);  		/* status_error_len will always be zero for unused descriptors  		 * because it's cleared in cleanup, and overlaps with hdr_addr @@ -1166,8 +1208,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)  			if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&  			    ctrl_vsi->vf)  				ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); -			ice_put_rx_buf(rx_ring, NULL, 0); -			cleaned_count++; +			if (++ntc == cnt) +				ntc = 0;  			continue;  		} @@ -1175,65 +1217,56 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)  			ICE_RX_FLX_DESC_PKT_LEN_M;  		/* retrieve a buffer from the ring */ -		rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); +		rx_buf = ice_get_rx_buf(rx_ring, size, ntc); -		if (!size) { -			xdp.data = NULL; -			xdp.data_end = NULL; -			xdp.data_hard_start = NULL; -			xdp.data_meta = NULL; -			goto construct_skb; -		} +		if (!xdp->data) { +			void *hard_start; -		hard_start = page_address(rx_buf->page) + rx_buf->page_offset - -			     offset; -		xdp_prepare_buff(&xdp, hard_start, offset, size, true); +			hard_start = page_address(rx_buf->page) + rx_buf->page_offset - +				     offset; +			xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);  #if (PAGE_SIZE > 4096) -		/* At larger PAGE_SIZE, frame_sz depend on len size */ -		xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); +			/* At larger PAGE_SIZE, frame_sz depend on len size */ +			xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);  #endif +			xdp_buff_clear_frags_flag(xdp); +		} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { +			break; +		} +		if (++ntc == cnt) +			ntc = 0; -		if (!xdp_prog) -			goto construct_skb; +		/* skip if it is NOP desc */ +		if (ice_is_non_eop(rx_ring, rx_desc)) +			continue; -		xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); -		if (!xdp_res) +		ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); +		if (rx_buf->act == ICE_XDP_PASS)  			goto construct_skb; -		if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { -			xdp_xmit |= xdp_res; -			ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); -		} else { -			rx_buf->pagecnt_bias++; -		} -		total_rx_bytes += size; +		total_rx_bytes += xdp_get_buff_len(xdp);  		total_rx_pkts++; -		cleaned_count++; -		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); +		xdp->data = NULL; +		rx_ring->first_desc = ntc;  		continue;  construct_skb: -		if (skb) { -			ice_add_rx_frag(rx_ring, rx_buf, skb, size); -		} else if (likely(xdp.data)) { -			if (ice_ring_uses_build_skb(rx_ring)) -				skb = ice_build_skb(rx_ring, rx_buf, &xdp); -			else -				skb = ice_construct_skb(rx_ring, rx_buf, &xdp); -		} +		if (likely(ice_ring_uses_build_skb(rx_ring))) +			skb = ice_build_skb(rx_ring, xdp); +		else +			skb = ice_construct_skb(rx_ring, xdp);  		/* exit if we failed to retrieve a buffer */  		if (!skb) { -			rx_ring->rx_stats.alloc_buf_failed++; -			if (rx_buf) -				rx_buf->pagecnt_bias++; +			rx_ring->ring_stats->rx_stats.alloc_page_failed++; +			rx_buf->act = ICE_XDP_CONSUMED; +			if (unlikely(xdp_buff_has_frags(xdp))) +				ice_set_rx_bufs_act(xdp, rx_ring, +						    ICE_XDP_CONSUMED); +			xdp->data = NULL; +			rx_ring->first_desc = ntc;  			break;  		} - -		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); -		cleaned_count++; - -		/* skip if it is NOP desc */ -		if (ice_is_non_eop(rx_ring, rx_desc)) -			continue; +		xdp->data = NULL; +		rx_ring->first_desc = ntc;  		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);  		if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, @@ -1245,10 +1278,8 @@ construct_skb:  		vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);  		/* pad the skb if needed, to make a valid ethernet frame */ -		if (eth_skb_pad(skb)) { -			skb = NULL; +		if (eth_skb_pad(skb))  			continue; -		}  		/* probably a little skewed due to removing CRC */  		total_rx_bytes += skb->len; @@ -1262,20 +1293,38 @@ construct_skb:  		ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);  		/* send completed skb up the stack */  		ice_receive_skb(rx_ring, skb, vlan_tag); -		skb = NULL;  		/* update budget accounting */  		total_rx_pkts++;  	} +	first = rx_ring->first_desc; +	while (cached_ntc != first) { +		struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; + +		if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { +			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); +			xdp_xmit |= buf->act; +		} else if (buf->act & ICE_XDP_CONSUMED) { +			buf->pagecnt_bias++; +		} else if (buf->act == ICE_XDP_PASS) { +			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); +		} + +		ice_put_rx_buf(rx_ring, buf); +		if (++cached_ntc >= cnt) +			cached_ntc = 0; +	} +	rx_ring->next_to_clean = ntc;  	/* return up to cleaned_count buffers to hardware */ -	failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); +	failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); -	if (xdp_prog) -		ice_finalize_xdp_rx(xdp_ring, xdp_xmit); -	rx_ring->skb = skb; +	if (xdp_xmit) +		ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu); -	ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); +	if (rx_ring->ring_stats) +		ice_update_rx_ring_stats(rx_ring, total_rx_pkts, +					 total_rx_bytes);  	/* guarantee a trip back through this routine if there was a failure */  	return failure ? budget : (int)total_rx_pkts; @@ -1292,15 +1341,25 @@ static void __ice_update_sample(struct ice_q_vector *q_vector,  		struct ice_tx_ring *tx_ring;  		ice_for_each_tx_ring(tx_ring, *rc) { -			packets += tx_ring->stats.pkts; -			bytes += tx_ring->stats.bytes; +			struct ice_ring_stats *ring_stats; + +			ring_stats = tx_ring->ring_stats; +			if (!ring_stats) +				continue; +			packets += ring_stats->stats.pkts; +			bytes += ring_stats->stats.bytes;  		}  	} else {  		struct ice_rx_ring *rx_ring;  		ice_for_each_rx_ring(rx_ring, *rc) { -			packets += rx_ring->stats.pkts; -			bytes += rx_ring->stats.bytes; +			struct ice_ring_stats *ring_stats; + +			ring_stats = rx_ring->ring_stats; +			if (!ring_stats) +				continue; +			packets += ring_stats->stats.pkts; +			bytes += ring_stats->stats.bytes;  		}  	} @@ -1549,7 +1608,7 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)  	/* A reprieve! - use start_queue because it doesn't call schedule */  	netif_tx_start_queue(txring_txq(tx_ring)); -	++tx_ring->tx_stats.restart_q; +	++tx_ring->ring_stats->tx_stats.restart_q;  	return 0;  } @@ -1670,6 +1729,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,  				       DMA_TO_DEVICE);  		tx_buf = &tx_ring->tx_buf[i]; +		tx_buf->type = ICE_TX_BUF_FRAG;  	}  	/* record SW timestamp if HW timestamp is not available */ @@ -1984,7 +2044,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)  	if (err < 0)  		return err; -	/* cppcheck-suppress unreadVariable */  	protocol = vlan_get_protocol(skb);  	if (eth_p_mpls(protocol)) @@ -2021,8 +2080,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)  		}  		/* reset pointers to inner headers */ - -		/* cppcheck-suppress unreadVariable */  		ip.hdr = skb_inner_network_header(skb);  		l4.hdr = skb_inner_transport_header(skb); @@ -2288,12 +2345,15 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)  	ice_trace(xmit_frame_ring, tx_ring, skb); +	if (unlikely(ipv6_hopopt_jumbo_remove(skb))) +		goto out_drop; +  	count = ice_xmit_desc_count(skb);  	if (ice_chk_linearize(skb, count)) {  		if (__skb_linearize(skb))  			goto out_drop;  		count = ice_txd_use_count(skb->len); -		tx_ring->tx_stats.tx_linearize++; +		tx_ring->ring_stats->tx_stats.tx_linearize++;  	}  	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, @@ -2304,7 +2364,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)  	 */  	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +  			      ICE_DESCS_FOR_CTX_DESC)) { -		tx_ring->tx_stats.tx_busy++; +		tx_ring->ring_stats->tx_stats.tx_busy++;  		return NETDEV_TX_BUSY;  	} @@ -2316,6 +2376,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)  	/* record the location of the first descriptor for this packet */  	first = &tx_ring->tx_buf[tx_ring->next_to_use];  	first->skb = skb; +	first->type = ICE_TX_BUF_SKB;  	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);  	first->gso_segs = 1;  	first->tx_flags = 0; @@ -2488,11 +2549,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)  					 dma_unmap_addr(tx_buf, dma),  					 dma_unmap_len(tx_buf, len),  					 DMA_TO_DEVICE); -		if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) +		if (tx_buf->type == ICE_TX_BUF_DUMMY)  			devm_kfree(tx_ring->dev, tx_buf->raw_buf);  		/* clear next_to_watch to prevent false hangs */ -		tx_buf->raw_buf = NULL; +		tx_buf->type = ICE_TX_BUF_EMPTY;  		tx_buf->tx_flags = 0;  		tx_buf->next_to_watch = NULL;  		dma_unmap_len_set(tx_buf, len, 0); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 932b5661ec4d..fff0efe28373 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -9,10 +9,12 @@  #define ICE_DFLT_IRQ_WORK	256  #define ICE_RXBUF_3072		3072  #define ICE_RXBUF_2048		2048 +#define ICE_RXBUF_1664		1664  #define ICE_RXBUF_1536		1536  #define ICE_MAX_CHAINED_RX_BUFS	5  #define ICE_MAX_BUF_TXD		8  #define ICE_MIN_TX_LEN		17 +#define ICE_MAX_FRAME_LEGACY_RX 8320  /* The size limit for a transmit buffer in a descriptor is (16K - 1).   * In order to align with the read requests we will align the value to @@ -110,15 +112,16 @@ static inline int ice_skb_pad(void)  	(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \  	      (R)->next_to_clean - (R)->next_to_use - 1) +#define ICE_RX_DESC_UNUSED(R)	\ +	((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \ +	      (R)->first_desc - (R)->next_to_use - 1) +  #define ICE_RING_QUARTER(R) ((R)->count >> 2)  #define ICE_TX_FLAGS_TSO	BIT(0)  #define ICE_TX_FLAGS_HW_VLAN	BIT(1)  #define ICE_TX_FLAGS_SW_VLAN	BIT(2) -/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be - * freed instead of returned like skb packets. - */ -#define ICE_TX_FLAGS_DUMMY_PKT	BIT(3) +/* Free, was ICE_TX_FLAGS_DUMMY_PKT */  #define ICE_TX_FLAGS_TSYN	BIT(4)  #define ICE_TX_FLAGS_IPV4	BIT(5)  #define ICE_TX_FLAGS_IPV6	BIT(6) @@ -134,6 +137,7 @@ static inline int ice_skb_pad(void)  #define ICE_XDP_TX		BIT(1)  #define ICE_XDP_REDIR		BIT(2)  #define ICE_XDP_EXIT		BIT(3) +#define ICE_SKB_CONSUMED	ICE_XDP_CONSUMED  #define ICE_RX_DMA_ATTR \  	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) @@ -142,15 +146,44 @@ static inline int ice_skb_pad(void)  #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) +/** + * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion + * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required + * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree() + * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA + * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats + * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats + * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats + * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats + */ +enum ice_tx_buf_type { +	ICE_TX_BUF_EMPTY	= 0U, +	ICE_TX_BUF_DUMMY, +	ICE_TX_BUF_FRAG, +	ICE_TX_BUF_SKB, +	ICE_TX_BUF_XDP_TX, +	ICE_TX_BUF_XDP_XMIT, +	ICE_TX_BUF_XSK_TX, +}; +  struct ice_tx_buf { -	struct ice_tx_desc *next_to_watch;  	union { -		struct sk_buff *skb; -		void *raw_buf; /* used for XDP */ +		struct ice_tx_desc *next_to_watch; +		u32 rs_idx; +	}; +	union { +		void *raw_buf;		/* used for XDP_TX and FDir rules */ +		struct sk_buff *skb;	/* used for .ndo_start_xmit() */ +		struct xdp_frame *xdpf;	/* used for .ndo_xdp_xmit() */ +		struct xdp_buff *xdp;	/* used for XDP_TX ZC */  	};  	unsigned int bytecount; -	unsigned short gso_segs; -	u32 tx_flags; +	union { +		unsigned int gso_segs; +		unsigned int nr_frags;	/* used for mbuf XDP */ +	}; +	u32 type:16;			/* &ice_tx_buf_type */ +	u32 tx_flags:16;  	DEFINE_DMA_UNMAP_LEN(len);  	DEFINE_DMA_UNMAP_ADDR(dma);  }; @@ -170,7 +203,9 @@ struct ice_rx_buf {  	dma_addr_t dma;  	struct page *page;  	unsigned int page_offset; -	u16 pagecnt_bias; +	unsigned int pgcnt; +	unsigned int act; +	unsigned int pagecnt_bias;  };  struct ice_q_stats { @@ -191,6 +226,16 @@ struct ice_rxq_stats {  	u64 alloc_buf_failed;  }; +struct ice_ring_stats { +	struct rcu_head rcu;	/* to avoid race on free */ +	struct ice_q_stats stats; +	struct u64_stats_sync syncp; +	union { +		struct ice_txq_stats tx_stats; +		struct ice_rxq_stats rx_stats; +	}; +}; +  enum ice_ring_state_t {  	ICE_TX_XPS_INIT_DONE,  	ICE_TX_NBITS, @@ -263,44 +308,44 @@ struct ice_rx_ring {  	struct ice_vsi *vsi;		/* Backreference to associated VSI */  	struct ice_q_vector *q_vector;	/* Backreference to associated vector */  	u8 __iomem *tail; +	u16 q_index;			/* Queue number of ring */ + +	u16 count;			/* Number of descriptors */ +	u16 reg_idx;			/* HW register index of the ring */ +	u16 next_to_alloc; +	/* CL2 - 2nd cacheline starts here */  	union {  		struct ice_rx_buf *rx_buf;  		struct xdp_buff **xdp_buf;  	}; -	/* CL2 - 2nd cacheline starts here */ -	struct xdp_rxq_info xdp_rxq; +	struct xdp_buff xdp;  	/* CL3 - 3rd cacheline starts here */ -	u16 q_index;			/* Queue number of ring */ - -	u16 count;			/* Number of descriptors */ -	u16 reg_idx;			/* HW register index of the ring */ +	struct bpf_prog *xdp_prog; +	u16 rx_offset;  	/* used in interrupt processing */  	u16 next_to_use;  	u16 next_to_clean; -	u16 next_to_alloc; -	u16 rx_offset; -	u16 rx_buf_len; +	u16 first_desc;  	/* stats structs */ -	struct ice_rxq_stats rx_stats; -	struct ice_q_stats	stats; -	struct u64_stats_sync syncp; +	struct ice_ring_stats *ring_stats;  	struct rcu_head rcu;		/* to avoid race on free */ -	/* CL4 - 3rd cacheline starts here */ +	/* CL4 - 4th cacheline starts here */  	struct ice_channel *ch; -	struct bpf_prog *xdp_prog;  	struct ice_tx_ring *xdp_ring;  	struct xsk_buff_pool *xsk_pool; -	struct sk_buff *skb;  	dma_addr_t dma;			/* physical address of ring */  	u64 cached_phctime; +	u16 rx_buf_len;  	u8 dcb_tc;			/* Traffic class of ring */  	u8 ptp_rx;  #define ICE_RX_FLAGS_RING_BUILD_SKB	BIT(1)  #define ICE_RX_FLAGS_CRC_STRIP_DIS	BIT(2)  	u8 flags; +	/* CL5 - 5th cacheline starts here */ +	struct xdp_rxq_info xdp_rxq;  } ____cacheline_internodealigned_in_smp;  struct ice_tx_ring { @@ -318,17 +363,14 @@ struct ice_tx_ring {  	struct xsk_buff_pool *xsk_pool;  	u16 next_to_use;  	u16 next_to_clean; -	u16 next_rs; -	u16 next_dd;  	u16 q_handle;			/* Queue handle per TC */  	u16 reg_idx;			/* HW register index of the ring */  	u16 count;			/* Number of descriptors */  	u16 q_index;			/* Queue number of ring */ +	u16 xdp_tx_active;  	/* stats structs */ -	struct ice_txq_stats tx_stats; +	struct ice_ring_stats *ring_stats;  	/* CL3 - 3rd cacheline starts here */ -	struct ice_q_stats	stats; -	struct u64_stats_sync syncp;  	struct rcu_head rcu;		/* to avoid race on free */  	DECLARE_BITMAP(xps_state, ICE_TX_NBITS);	/* XPS Config State */  	struct ice_channel *ch; @@ -336,7 +378,6 @@ struct ice_tx_ring {  	spinlock_t tx_lock;  	u32 txq_teid;			/* Added Tx queue TEID */  	/* CL4 - 4th cacheline starts here */ -	u16 xdp_tx_active;  #define ICE_TX_FLAGS_RING_XDP		BIT(0)  #define ICE_TX_FLAGS_RING_VLAN_L2TAG1	BIT(1)  #define ICE_TX_FLAGS_RING_VLAN_L2TAG2	BIT(2) @@ -425,7 +466,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)  union ice_32b_rx_flex_desc; -bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count); +bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);  netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);  u16  ice_select_queue(struct net_device *dev, struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 7ee38d02d1e5..7bc5aa340c7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -221,128 +221,217 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)  }  /** + * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer + * @dev: device for DMA mapping + * @tx_buf: Tx buffer to clean + * @bq: XDP bulk flush struct + */ +static void +ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf, +		     struct xdp_frame_bulk *bq) +{ +	dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), +			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); +	dma_unmap_len_set(tx_buf, len, 0); + +	switch (tx_buf->type) { +	case ICE_TX_BUF_XDP_TX: +		page_frag_free(tx_buf->raw_buf); +		break; +	case ICE_TX_BUF_XDP_XMIT: +		xdp_return_frame_bulk(tx_buf->xdpf, bq); +		break; +	} + +	tx_buf->type = ICE_TX_BUF_EMPTY; +} + +/**   * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring   * @xdp_ring: XDP ring to clean   */ -static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) +static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)  { -	unsigned int total_bytes = 0, total_pkts = 0; -	u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); -	u16 ntc = xdp_ring->next_to_clean; -	struct ice_tx_desc *next_dd_desc; -	u16 next_dd = xdp_ring->next_dd; -	struct ice_tx_buf *tx_buf; -	int i; +	int total_bytes = 0, total_pkts = 0; +	struct device *dev = xdp_ring->dev; +	u32 ntc = xdp_ring->next_to_clean; +	struct ice_tx_desc *tx_desc; +	u32 cnt = xdp_ring->count; +	struct xdp_frame_bulk bq; +	u32 frags, xdp_tx = 0; +	u32 ready_frames = 0; +	u32 idx; +	u32 ret; + +	idx = xdp_ring->tx_buf[ntc].rs_idx; +	tx_desc = ICE_TX_DESC(xdp_ring, idx); +	if (tx_desc->cmd_type_offset_bsz & +	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) { +		if (idx >= ntc) +			ready_frames = idx - ntc + 1; +		else +			ready_frames = idx + cnt - ntc + 1; +	} -	next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd); -	if (!(next_dd_desc->cmd_type_offset_bsz & -	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) -		return; +	if (unlikely(!ready_frames)) +		return 0; +	ret = ready_frames; + +	xdp_frame_bulk_init(&bq); +	rcu_read_lock(); /* xdp_return_frame_bulk() */ -	for (i = 0; i < tx_thresh; i++) { -		tx_buf = &xdp_ring->tx_buf[ntc]; +	while (ready_frames) { +		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; +		struct ice_tx_buf *head = tx_buf; +		/* bytecount holds size of head + frags */  		total_bytes += tx_buf->bytecount; -		/* normally tx_buf->gso_segs was taken but at this point -		 * it's always 1 for us -		 */ +		frags = tx_buf->nr_frags;  		total_pkts++; - -		page_frag_free(tx_buf->raw_buf); -		dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), -				 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); -		dma_unmap_len_set(tx_buf, len, 0); -		tx_buf->raw_buf = NULL; +		/* count head + frags */ +		ready_frames -= frags + 1; +		xdp_tx++;  		ntc++; -		if (ntc >= xdp_ring->count) +		if (ntc == cnt)  			ntc = 0; + +		for (int i = 0; i < frags; i++) { +			tx_buf = &xdp_ring->tx_buf[ntc]; + +			ice_clean_xdp_tx_buf(dev, tx_buf, &bq); +			ntc++; +			if (ntc == cnt) +				ntc = 0; +		} + +		ice_clean_xdp_tx_buf(dev, head, &bq);  	} -	next_dd_desc->cmd_type_offset_bsz = 0; -	xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh; -	if (xdp_ring->next_dd > xdp_ring->count) -		xdp_ring->next_dd = tx_thresh - 1; +	xdp_flush_frame_bulk(&bq); +	rcu_read_unlock(); + +	tx_desc->cmd_type_offset_bsz = 0;  	xdp_ring->next_to_clean = ntc; +	xdp_ring->xdp_tx_active -= xdp_tx;  	ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes); + +	return ret;  }  /** - * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission - * @data: packet data pointer - * @size: packet data size + * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission + * @xdp: XDP buffer to be placed onto Tx descriptors   * @xdp_ring: XDP ring for transmission + * @frame: whether this comes from .ndo_xdp_xmit()   */ -int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring) +int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, +			bool frame)  { -	u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); -	u16 i = xdp_ring->next_to_use; +	struct skb_shared_info *sinfo = NULL; +	u32 size = xdp->data_end - xdp->data; +	struct device *dev = xdp_ring->dev; +	u32 ntu = xdp_ring->next_to_use;  	struct ice_tx_desc *tx_desc; +	struct ice_tx_buf *tx_head;  	struct ice_tx_buf *tx_buf; -	dma_addr_t dma; +	u32 cnt = xdp_ring->count; +	void *data = xdp->data; +	u32 nr_frags = 0; +	u32 free_space; +	u32 frag = 0; + +	free_space = ICE_DESC_UNUSED(xdp_ring); +	if (free_space < ICE_RING_QUARTER(xdp_ring)) +		free_space += ice_clean_xdp_irq(xdp_ring); + +	if (unlikely(!free_space)) +		goto busy; + +	if (unlikely(xdp_buff_has_frags(xdp))) { +		sinfo = xdp_get_shared_info_from_buff(xdp); +		nr_frags = sinfo->nr_frags; +		if (free_space < nr_frags + 1) +			goto busy; +	} -	if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh) -		ice_clean_xdp_irq(xdp_ring); +	tx_desc = ICE_TX_DESC(xdp_ring, ntu); +	tx_head = &xdp_ring->tx_buf[ntu]; +	tx_buf = tx_head; -	if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { -		xdp_ring->tx_stats.tx_busy++; -		return ICE_XDP_CONSUMED; -	} +	for (;;) { +		dma_addr_t dma; -	dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); -	if (dma_mapping_error(xdp_ring->dev, dma)) -		return ICE_XDP_CONSUMED; +		dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); +		if (dma_mapping_error(dev, dma)) +			goto dma_unmap; -	tx_buf = &xdp_ring->tx_buf[i]; -	tx_buf->bytecount = size; -	tx_buf->gso_segs = 1; -	tx_buf->raw_buf = data; +		/* record length, and DMA address */ +		dma_unmap_len_set(tx_buf, len, size); +		dma_unmap_addr_set(tx_buf, dma, dma); -	/* record length, and DMA address */ -	dma_unmap_len_set(tx_buf, len, size); -	dma_unmap_addr_set(tx_buf, dma, dma); +		if (frame) { +			tx_buf->type = ICE_TX_BUF_FRAG; +		} else { +			tx_buf->type = ICE_TX_BUF_XDP_TX; +			tx_buf->raw_buf = data; +		} -	tx_desc = ICE_TX_DESC(xdp_ring, i); -	tx_desc->buf_addr = cpu_to_le64(dma); -	tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0, -						      size, 0); +		tx_desc->buf_addr = cpu_to_le64(dma); +		tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); -	xdp_ring->xdp_tx_active++; -	i++; -	if (i == xdp_ring->count) { -		i = 0; -		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); -		tx_desc->cmd_type_offset_bsz |= -			cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); -		xdp_ring->next_rs = tx_thresh - 1; +		ntu++; +		if (ntu == cnt) +			ntu = 0; + +		if (frag == nr_frags) +			break; + +		tx_desc = ICE_TX_DESC(xdp_ring, ntu); +		tx_buf = &xdp_ring->tx_buf[ntu]; + +		data = skb_frag_address(&sinfo->frags[frag]); +		size = skb_frag_size(&sinfo->frags[frag]); +		frag++;  	} -	xdp_ring->next_to_use = i; -	if (i > xdp_ring->next_rs) { -		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); -		tx_desc->cmd_type_offset_bsz |= -			cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); -		xdp_ring->next_rs += tx_thresh; +	/* store info about bytecount and frag count in first desc */ +	tx_head->bytecount = xdp_get_buff_len(xdp); +	tx_head->nr_frags = nr_frags; + +	if (frame) { +		tx_head->type = ICE_TX_BUF_XDP_XMIT; +		tx_head->xdpf = xdp->data_hard_start;  	} +	/* update last descriptor from a frame with EOP */ +	tx_desc->cmd_type_offset_bsz |= +		cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S); + +	xdp_ring->xdp_tx_active++; +	xdp_ring->next_to_use = ntu; +  	return ICE_XDP_TX; -} -/** - * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it - * @xdp: XDP buffer - * @xdp_ring: XDP Tx ring - * - * Returns negative on failure, 0 on success. - */ -int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring) -{ -	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); +dma_unmap: +	for (;;) { +		tx_buf = &xdp_ring->tx_buf[ntu]; +		dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma), +			       dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); +		dma_unmap_len_set(tx_buf, len, 0); +		if (tx_buf == tx_head) +			break; + +		if (!ntu) +			ntu += cnt; +		ntu--; +	} +	return ICE_XDP_CONSUMED; -	if (unlikely(!xdpf)) -		return ICE_XDP_CONSUMED; +busy: +	xdp_ring->ring_stats->tx_stats.tx_busy++; -	return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); +	return ICE_XDP_CONSUMED;  }  /** @@ -354,14 +443,21 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)   * should be called when a batch of packets has been processed in the   * napi loop.   */ -void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res) +void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, +			 u32 first_idx)  { +	struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx]; +  	if (xdp_res & ICE_XDP_REDIR)  		xdp_do_flush_map();  	if (xdp_res & ICE_XDP_TX) {  		if (static_branch_unlikely(&ice_xdp_locking_key))  			spin_lock(&xdp_ring->tx_lock); +		/* store index of descriptor with RS bit set in the first +		 * ice_tx_buf of given NAPI batch +		 */ +		tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);  		ice_xdp_ring_update_tail(xdp_ring);  		if (static_branch_unlikely(&ice_xdp_locking_key))  			spin_unlock(&xdp_ring->tx_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index c7d2954dc9ea..115969ecdf7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -6,6 +6,36 @@  #include "ice.h"  /** + * ice_set_rx_bufs_act - propagate Rx buffer action to frags + * @xdp: XDP buffer representing frame (linear and frags part) + * @rx_ring: Rx ring struct + * act: action to store onto Rx buffers related to XDP buffer parts + * + * Set action that should be taken before putting Rx buffer from first frag + * to one before last. Last one is handled by caller of this function as it + * is the EOP frag that is currently being processed. This function is + * supposed to be called only when XDP buffer contains frags. + */ +static inline void +ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, +		    const unsigned int act) +{ +	const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); +	u32 first = rx_ring->first_desc; +	u32 nr_frags = sinfo->nr_frags; +	u32 cnt = rx_ring->count; +	struct ice_rx_buf *buf; + +	for (int i = 0; i < nr_frags; i++) { +		buf = &rx_ring->rx_buf[first]; +		buf->act = act; + +		if (++first == cnt) +			first = 0; +	} +} + +/**   * ice_test_staterr - tests bits in Rx descriptor status and error fields   * @status_err_n: Rx descriptor status_error0 or status_error1 bits   * @stat_err_bits: value to mask @@ -21,6 +51,28 @@ ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)  	return !!(status_err_n & cpu_to_le16(stat_err_bits));  } +/** + * ice_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * If the buffer is an EOP buffer, this function exits returning false, + * otherwise return true indicating that this is in fact a non-EOP buffer. + */ +static inline bool +ice_is_non_eop(const struct ice_rx_ring *rx_ring, +	       const union ice_32b_rx_flex_desc *rx_desc) +{ +	/* if we are the last buffer then there is nothing else to do */ +#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) +	if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF))) +		return false; + +	rx_ring->ring_stats->rx_stats.non_eop_descs++; + +	return true; +} +  static inline __le64  ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)  { @@ -70,9 +122,28 @@ static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)  	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);  } -void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res); +/** + * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU) + * @xdp_ring: XDP ring to produce the HW Tx descriptors on + * + * returns index of descriptor that had RS bit produced on + */ +static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring) +{ +	u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; +	struct ice_tx_desc *tx_desc; + +	tx_desc = ICE_TX_DESC(xdp_ring, rs_idx); +	tx_desc->cmd_type_offset_bsz |= +		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); + +	return rs_idx; +} + +void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);  int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring); -int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring); +int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, +			bool frame);  void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);  void  ice_process_skb_fields(struct ice_rx_ring *rx_ring, diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index e1abfcee96dc..e3f622cad425 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -524,7 +524,14 @@ struct ice_sched_node {  	struct ice_sched_node *sibling; /* next sibling in the same layer */  	struct ice_sched_node **children;  	struct ice_aqc_txsched_elem_data info; +	char *name; +	struct devlink_rate *rate_node; +	u64 tx_max; +	u64 tx_share;  	u32 agg_id;			/* aggregator group ID */ +	u32 id; +	u32 tx_priority; +	u32 tx_weight;  	u16 vsi_handle;  	u8 in_use;			/* suspended or in use */  	u8 tx_sched_layer;		/* Logical Layer (1-9) */ @@ -706,7 +713,9 @@ struct ice_port_info {  	/* List contain profile ID(s) and other params per layer */  	struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];  	struct ice_qos_cfg qos_cfg; +	struct xarray sched_node_ids;  	u8 is_vf:1; +	u8 is_custom_tx_enabled:1;  };  struct ice_switch_info { diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 1c51778db951..0e57bd1b85fd 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -237,16 +237,49 @@ static void ice_vf_clear_counters(struct ice_vf *vf)   */  static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)  { +	/* Close any IRQ mapping now */ +	if (vf->vf_ops->irq_close) +		vf->vf_ops->irq_close(vf); +  	ice_vf_clear_counters(vf);  	vf->vf_ops->clear_reset_trigger(vf);  }  /** + * ice_vf_recreate_vsi - Release and re-create the VF's VSI + * @vf: VF to recreate the VSI for + * + * This is only called when a single VF is being reset (i.e. VVF, VFLR, host + * VF configuration change, etc) + * + * It releases and then re-creates a new VSI. + */ +static int ice_vf_recreate_vsi(struct ice_vf *vf) +{ +	struct ice_pf *pf = vf->pf; +	int err; + +	ice_vf_vsi_release(vf); + +	err = vf->vf_ops->create_vsi(vf); +	if (err) { +		dev_err(ice_pf_to_dev(pf), +			"Failed to recreate the VF%u's VSI, error %d\n", +			vf->vf_id, err); +		return err; +	} + +	return 0; +} + +/**   * ice_vf_rebuild_vsi - rebuild the VF's VSI   * @vf: VF to rebuild the VSI for   *   * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the   * host, PFR, CORER, etc.). + * + * It reprograms the VSI configuration back into hardware.   */  static int ice_vf_rebuild_vsi(struct ice_vf *vf)  { @@ -256,7 +289,7 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)  	if (WARN_ON(!vsi))  		return -EINVAL; -	if (ice_vsi_rebuild(vsi, true)) { +	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {  		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",  			vf->vf_id);  		return -EIO; @@ -271,6 +304,21 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)  }  /** + * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild + * @vf: the VF being reset + * + * Perform reset tasks which must occur after the VSI has been re-created or + * rebuilt during a VF reset. + */ +static void ice_vf_post_vsi_rebuild(struct ice_vf *vf) +{ +	ice_vf_rebuild_host_cfg(vf); +	ice_vf_set_initialized(vf); + +	vf->vf_ops->post_vsi_rebuild(vf); +} + +/**   * ice_is_any_vf_in_unicast_promisc - check if any VF(s)   * are in unicast promiscuous mode   * @pf: PF structure for accessing VF(s) @@ -495,7 +543,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)  		ice_vf_pre_vsi_rebuild(vf);  		ice_vf_rebuild_vsi(vf); -		vf->vf_ops->post_vsi_rebuild(vf); +		ice_vf_post_vsi_rebuild(vf);  		mutex_unlock(&vf->cfg_lock);  	} @@ -639,14 +687,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)  	ice_vf_pre_vsi_rebuild(vf); -	if (vf->vf_ops->vsi_rebuild(vf)) { +	if (ice_vf_recreate_vsi(vf)) {  		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",  			vf->vf_id);  		err = -EFAULT;  		goto out_unlock;  	} -	vf->vf_ops->post_vsi_rebuild(vf); +	ice_vf_post_vsi_rebuild(vf);  	vsi = ice_get_vf_vsi(vf);  	if (WARN_ON(!vsi)) {  		err = -EINVAL; @@ -673,7 +721,7 @@ out_unlock:   * ice_set_vf_state_qs_dis - Set VF queues state to disabled   * @vf: pointer to the VF structure   */ -void ice_set_vf_state_qs_dis(struct ice_vf *vf) +static void ice_set_vf_state_qs_dis(struct ice_vf *vf)  {  	/* Clear Rx/Tx enabled queues flag */  	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); @@ -681,9 +729,45 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf)  	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);  } +/** + * ice_set_vf_state_dis - Set VF state to disabled + * @vf: pointer to the VF structure + */ +void ice_set_vf_state_dis(struct ice_vf *vf) +{ +	ice_set_vf_state_qs_dis(vf); +	vf->vf_ops->clear_reset_state(vf); +} +  /* Private functions only accessed from other virtualization files */  /** + * ice_initialize_vf_entry - Initialize a VF entry + * @vf: pointer to the VF structure + */ +void ice_initialize_vf_entry(struct ice_vf *vf) +{ +	struct ice_pf *pf = vf->pf; +	struct ice_vfs *vfs; + +	vfs = &pf->vfs; + +	/* assign default capabilities */ +	vf->spoofchk = true; +	vf->num_vf_qs = vfs->num_qps_per; +	ice_vc_set_default_allowlist(vf); +	ice_virtchnl_set_dflt_ops(vf); + +	/* ctrl_vsi_idx will be set to a valid value only when iAVF +	 * creates its first fdir rule. +	 */ +	ice_vf_ctrl_invalidate_vsi(vf); +	ice_vf_fdir_init(vf); + +	mutex_init(&vf->cfg_lock); +} + +/**   * ice_dis_vf_qs - Disable the VF queues   * @vf: pointer to the VF structure   */ @@ -700,6 +784,30 @@ void ice_dis_vf_qs(struct ice_vf *vf)  }  /** + * ice_err_to_virt_err - translate errors for VF return code + * @err: error return code + */ +enum virtchnl_status_code ice_err_to_virt_err(int err) +{ +	switch (err) { +	case 0: +		return VIRTCHNL_STATUS_SUCCESS; +	case -EINVAL: +	case -ENODEV: +		return VIRTCHNL_STATUS_ERR_PARAM; +	case -ENOMEM: +		return VIRTCHNL_STATUS_ERR_NO_MEMORY; +	case -EALREADY: +	case -EBUSY: +	case -EIO: +	case -ENOSPC: +		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; +	default: +		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; +	} +} + +/**   * ice_check_vf_init - helper to check if VF init complete   * @vf: the pointer to the VF to check   */ @@ -900,18 +1008,18 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)  	vf->num_mac++; -	if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { -		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, +	if (is_valid_ether_addr(vf->hw_lan_addr)) { +		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,  					  ICE_FWD_TO_VSI);  		if (status) {  			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", -				&vf->hw_lan_addr.addr[0], vf->vf_id, +				&vf->hw_lan_addr[0], vf->vf_id,  				status);  			return status;  		}  		vf->num_mac++; -		ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); +		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);  	}  	return 0; @@ -1091,11 +1199,16 @@ void ice_vf_ctrl_vsi_release(struct ice_vf *vf)   */  struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)  { -	struct ice_port_info *pi = ice_vf_get_port_info(vf); +	struct ice_vsi_cfg_params params = {};  	struct ice_pf *pf = vf->pf;  	struct ice_vsi *vsi; -	vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL); +	params.type = ICE_VSI_CTRL; +	params.pi = ice_vf_get_port_info(vf); +	params.vf = vf; +	params.flags = ICE_VSI_FLAG_INIT; + +	vsi = ice_vsi_setup(pf, ¶ms);  	if (!vsi) {  		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");  		ice_vf_ctrl_invalidate_vsi(vf); @@ -1105,6 +1218,60 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)  }  /** + * ice_vf_init_host_cfg - Initialize host admin configuration + * @vf: VF to initialize + * @vsi: the VSI created at initialization + * + * Initialize the VF host configuration. Called during VF creation to setup + * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It + * should only be called during VF creation. + */ +int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) +{ +	struct ice_vsi_vlan_ops *vlan_ops; +	struct ice_pf *pf = vf->pf; +	u8 broadcast[ETH_ALEN]; +	struct device *dev; +	int err; + +	dev = ice_pf_to_dev(pf); + +	err = ice_vsi_add_vlan_zero(vsi); +	if (err) { +		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n", +			 vf->vf_id); +		return err; +	} + +	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); +	err = vlan_ops->ena_rx_filtering(vsi); +	if (err) { +		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n", +			 vf->vf_id); +		return err; +	} + +	eth_broadcast_addr(broadcast); +	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); +	if (err) { +		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n", +			vf->vf_id, err); +		return err; +	} + +	vf->num_mac = 1; + +	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk); +	if (err) { +		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n", +			 vf->vf_id); +		return err; +	} + +	return 0; +} + +/**   * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access   * @vf: VF to remove access to VSI for   */ @@ -1115,6 +1282,24 @@ void ice_vf_invalidate_vsi(struct ice_vf *vf)  }  /** + * ice_vf_vsi_release - Release the VF VSI and invalidate indexes + * @vf: pointer to the VF structure + * + * Release the VF associated with this VSI and then invalidate the VSI + * indexes. + */ +void ice_vf_vsi_release(struct ice_vf *vf) +{ +	struct ice_vsi *vsi = ice_get_vf_vsi(vf); + +	if (WARN_ON(!vsi)) +		return; + +	ice_vsi_release(vsi); +	ice_vf_invalidate_vsi(vf); +} + +/**   * ice_vf_set_initialized - VF is ready for VIRTCHNL communication   * @vf: VF to set in initialized state   * diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 52bd9a3816bf..ef30f05b5d02 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -56,11 +56,13 @@ struct ice_mdd_vf_events {  struct ice_vf_ops {  	enum ice_disq_rst_src reset_type;  	void (*free)(struct ice_vf *vf); +	void (*clear_reset_state)(struct ice_vf *vf);  	void (*clear_mbx_register)(struct ice_vf *vf);  	void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);  	bool (*poll_reset_status)(struct ice_vf *vf);  	void (*clear_reset_trigger)(struct ice_vf *vf); -	int (*vsi_rebuild)(struct ice_vf *vf); +	void (*irq_close)(struct ice_vf *vf); +	int (*create_vsi)(struct ice_vf *vf);  	void (*post_vsi_rebuild)(struct ice_vf *vf);  }; @@ -96,8 +98,8 @@ struct ice_vf {  	struct ice_sw *vf_sw_id;	/* switch ID the VF VSIs connect to */  	struct virtchnl_version_info vf_ver;  	u32 driver_caps;		/* reported by VF driver */ -	struct virtchnl_ether_addr dev_lan_addr; -	struct virtchnl_ether_addr hw_lan_addr; +	u8 dev_lan_addr[ETH_ALEN]; +	u8 hw_lan_addr[ETH_ALEN];  	struct ice_time_mac legacy_last_added_umac;  	DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);  	DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF); @@ -213,7 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);  struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);  bool ice_is_vf_disabled(struct ice_vf *vf);  int ice_check_vf_ready_for_cfg(struct ice_vf *vf); -void ice_set_vf_state_qs_dis(struct ice_vf *vf); +void ice_set_vf_state_dis(struct ice_vf *vf);  bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);  void  ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi, @@ -259,7 +261,7 @@ static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)  	return -EOPNOTSUPP;  } -static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) +static inline void ice_set_vf_state_dis(struct ice_vf *vf)  {  } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h index 15887e772c76..6f3293b793b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h @@ -23,8 +23,10 @@  #warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"  #endif +void ice_initialize_vf_entry(struct ice_vf *vf);  void ice_dis_vf_qs(struct ice_vf *vf);  int ice_check_vf_init(struct ice_vf *vf); +enum virtchnl_status_code ice_err_to_virt_err(int err);  struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf);  int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);  bool ice_is_vf_trusted(struct ice_vf *vf); @@ -34,7 +36,9 @@ void ice_vf_rebuild_host_cfg(struct ice_vf *vf);  void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);  void ice_vf_ctrl_vsi_release(struct ice_vf *vf);  struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); +int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi);  void ice_vf_invalidate_vsi(struct ice_vf *vf); +void ice_vf_vsi_release(struct ice_vf *vf);  void ice_vf_set_initialized(struct ice_vf *vf);  #endif /* _ICE_VF_LIB_PRIVATE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c index fc8c93fa4455..f56fa94ff3d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c @@ -39,6 +39,20 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,  	return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);  } +static const u32 ice_legacy_aq_to_vc_speed[] = { +	VIRTCHNL_LINK_SPEED_100MB,	/* BIT(0) */ +	VIRTCHNL_LINK_SPEED_100MB, +	VIRTCHNL_LINK_SPEED_1GB, +	VIRTCHNL_LINK_SPEED_1GB, +	VIRTCHNL_LINK_SPEED_1GB, +	VIRTCHNL_LINK_SPEED_10GB, +	VIRTCHNL_LINK_SPEED_20GB, +	VIRTCHNL_LINK_SPEED_25GB, +	VIRTCHNL_LINK_SPEED_40GB, +	VIRTCHNL_LINK_SPEED_40GB, +	VIRTCHNL_LINK_SPEED_40GB, +}; +  /**   * ice_conv_link_speed_to_virtchnl   * @adv_link_support: determines the format of the returned link speed @@ -53,83 +67,20 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,   */  u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)  { -	u32 speed; +	/* convert a BIT() value into an array index */ +	u32 index = fls(link_speed) - 1;  	if (adv_link_support) -		switch (link_speed) { -		case ICE_AQ_LINK_SPEED_10MB: -			speed = ICE_LINK_SPEED_10MBPS; -			break; -		case ICE_AQ_LINK_SPEED_100MB: -			speed = ICE_LINK_SPEED_100MBPS; -			break; -		case ICE_AQ_LINK_SPEED_1000MB: -			speed = ICE_LINK_SPEED_1000MBPS; -			break; -		case ICE_AQ_LINK_SPEED_2500MB: -			speed = ICE_LINK_SPEED_2500MBPS; -			break; -		case ICE_AQ_LINK_SPEED_5GB: -			speed = ICE_LINK_SPEED_5000MBPS; -			break; -		case ICE_AQ_LINK_SPEED_10GB: -			speed = ICE_LINK_SPEED_10000MBPS; -			break; -		case ICE_AQ_LINK_SPEED_20GB: -			speed = ICE_LINK_SPEED_20000MBPS; -			break; -		case ICE_AQ_LINK_SPEED_25GB: -			speed = ICE_LINK_SPEED_25000MBPS; -			break; -		case ICE_AQ_LINK_SPEED_40GB: -			speed = ICE_LINK_SPEED_40000MBPS; -			break; -		case ICE_AQ_LINK_SPEED_50GB: -			speed = ICE_LINK_SPEED_50000MBPS; -			break; -		case ICE_AQ_LINK_SPEED_100GB: -			speed = ICE_LINK_SPEED_100000MBPS; -			break; -		default: -			speed = ICE_LINK_SPEED_UNKNOWN; -			break; -		} -	else +		return ice_get_link_speed(index); +	else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))  		/* Virtchnl speeds are not defined for every speed supported in  		 * the hardware. To maintain compatibility with older AVF  		 * drivers, while reporting the speed the new speed values are  		 * resolved to the closest known virtchnl speeds  		 */ -		switch (link_speed) { -		case ICE_AQ_LINK_SPEED_10MB: -		case ICE_AQ_LINK_SPEED_100MB: -			speed = (u32)VIRTCHNL_LINK_SPEED_100MB; -			break; -		case ICE_AQ_LINK_SPEED_1000MB: -		case ICE_AQ_LINK_SPEED_2500MB: -		case ICE_AQ_LINK_SPEED_5GB: -			speed = (u32)VIRTCHNL_LINK_SPEED_1GB; -			break; -		case ICE_AQ_LINK_SPEED_10GB: -			speed = (u32)VIRTCHNL_LINK_SPEED_10GB; -			break; -		case ICE_AQ_LINK_SPEED_20GB: -			speed = (u32)VIRTCHNL_LINK_SPEED_20GB; -			break; -		case ICE_AQ_LINK_SPEED_25GB: -			speed = (u32)VIRTCHNL_LINK_SPEED_25GB; -			break; -		case ICE_AQ_LINK_SPEED_40GB: -		case ICE_AQ_LINK_SPEED_50GB: -		case ICE_AQ_LINK_SPEED_100GB: -			speed = (u32)VIRTCHNL_LINK_SPEED_40GB; -			break; -		default: -			speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN; -			break; -		} +		return ice_legacy_aq_to_vc_speed[index]; -	return speed; +	return VIRTCHNL_LINK_SPEED_UNKNOWN;  }  /* The mailbox overflow detection algorithm helps to check if there diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index 5ecc0ee9a78e..b1ffb81893d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)  		/* outer VLAN ops regardless of port VLAN config */  		vlan_ops->add_vlan = ice_vsi_add_vlan; -		vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;  		vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;  		vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;  		if (ice_vf_is_port_vlan_ena(vf)) {  			/* setup outer VLAN ops */  			vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; +			/* all Rx traffic should be in the domain of the +			 * assigned port VLAN, so prevent disabling Rx VLAN +			 * filtering +			 */ +			vlan_ops->dis_rx_filtering = noop_vlan;  			vlan_ops->ena_rx_filtering =  				ice_vsi_ena_rx_vlan_filtering; @@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)  			vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;  			vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;  		} else { +			vlan_ops->dis_rx_filtering = +				ice_vsi_dis_rx_vlan_filtering; +  			if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))  				vlan_ops->ena_rx_filtering = noop_vlan;  			else @@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)  			vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;  			vlan_ops->ena_rx_filtering =  				ice_vsi_ena_rx_vlan_filtering; +			/* all Rx traffic should be in the domain of the +			 * assigned port VLAN, so prevent disabling Rx VLAN +			 * filtering +			 */ +			vlan_ops->dis_rx_filtering = noop_vlan;  		} else { +			vlan_ops->dis_rx_filtering = +				ice_vsi_dis_rx_vlan_filtering;  			if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))  				vlan_ops->ena_rx_filtering = noop_vlan;  			else diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 2b4c791b6cba..e24e3f5017ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -462,6 +462,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)  			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;  	} +	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) +		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; +  	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)  		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; @@ -504,7 +507,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)  	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;  	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;  	ether_addr_copy(vfres->vsi_res[0].default_mac_addr, -			vf->hw_lan_addr.addr); +			vf->hw_lan_addr);  	/* match guest capabilities */  	vf->driver_caps = vfres->vf_cap_flags; @@ -1658,6 +1661,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)  		/* copy Rx queue info from VF into VSI */  		if (qpi->rxq.ring_len > 0) {  			u16 max_frame_size = ice_vc_get_max_frame_size(vf); +			u32 rxdid;  			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;  			vsi->rx_rings[i]->count = qpi->rxq.ring_len; @@ -1685,6 +1689,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)  					 vf->vf_id, i);  				goto error_param;  			} + +			/* If Rx flex desc is supported, select RXDID for Rx +			 * queues. Otherwise, use legacy 32byte descriptor +			 * format. Legacy 16byte descriptor is not supported. +			 * If this RXDID is selected, return error. +			 */ +			if (vf->driver_caps & +			    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { +				rxdid = qpi->rxq.rxdid; +				if (!(BIT(rxdid) & pf->supported_rxdids)) +					goto error_param; +			} else { +				rxdid = ICE_RXDID_LEGACY_1; +			} + +			ice_write_qrxflxp_cntxt(&vsi->back->hw, +						vsi->rxq_map[q_idx], +						rxdid, 0x03, false);  		}  	} @@ -1780,10 +1802,10 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)  	 * was correctly specified over VIRTCHNL  	 */  	if ((ice_is_vc_addr_legacy(vc_ether_addr) && -	     is_zero_ether_addr(vf->hw_lan_addr.addr)) || +	     is_zero_ether_addr(vf->hw_lan_addr)) ||  	    ice_is_vc_addr_primary(vc_ether_addr)) { -		ether_addr_copy(vf->dev_lan_addr.addr, mac_addr); -		ether_addr_copy(vf->hw_lan_addr.addr, mac_addr); +		ether_addr_copy(vf->dev_lan_addr, mac_addr); +		ether_addr_copy(vf->hw_lan_addr, mac_addr);  	}  	/* hardware and device MACs are already set, but its possible that the @@ -1814,7 +1836,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,  	int ret;  	/* device MAC already added */ -	if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) +	if (ether_addr_equal(mac_addr, vf->dev_lan_addr))  		return 0;  	if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { @@ -1869,8 +1891,8 @@ ice_update_legacy_cached_mac(struct ice_vf *vf,  	    ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))  		return; -	ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr); -	ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr); +	ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr); +	ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);  }  /** @@ -1884,15 +1906,15 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)  	u8 *mac_addr = vc_ether_addr->addr;  	if (!is_valid_ether_addr(mac_addr) || -	    !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) +	    !ether_addr_equal(vf->dev_lan_addr, mac_addr))  		return;  	/* allow the device MAC to be repopulated in the add flow and don't -	 * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant +	 * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant  	 * to be persistent on VM reboot and across driver unload/load, which  	 * won't work if we clear the hardware MAC here  	 */ -	eth_zero_addr(vf->dev_lan_addr.addr); +	eth_zero_addr(vf->dev_lan_addr);  	ice_update_legacy_cached_mac(vf, vc_ether_addr);  } @@ -1912,7 +1934,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,  	int status;  	if (!ice_can_vf_change_mac(vf) && -	    ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) +	    ether_addr_equal(vf->dev_lan_addr, mac_addr))  		return 0;  	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); @@ -2457,6 +2479,164 @@ error_param:  }  /** + * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware + * @vf: pointer to the VF info + */ +static int ice_vc_get_rss_hena(struct ice_vf *vf) +{ +	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; +	struct virtchnl_rss_hena *vrh = NULL; +	int len = 0, ret; + +	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { +		v_ret = VIRTCHNL_STATUS_ERR_PARAM; +		goto err; +	} + +	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { +		dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n"); +		v_ret = VIRTCHNL_STATUS_ERR_PARAM; +		goto err; +	} + +	len = sizeof(struct virtchnl_rss_hena); +	vrh = kzalloc(len, GFP_KERNEL); +	if (!vrh) { +		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; +		len = 0; +		goto err; +	} + +	vrh->hena = ICE_DEFAULT_RSS_HENA; +err: +	/* send the response back to the VF */ +	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret, +				    (u8 *)vrh, len); +	kfree(vrh); +	return ret; +} + +/** + * ice_vc_set_rss_hena - set RSS HENA bits for the VF + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + */ +static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) +{ +	struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; +	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; +	struct ice_pf *pf = vf->pf; +	struct ice_vsi *vsi; +	struct device *dev; +	int status; + +	dev = ice_pf_to_dev(pf); + +	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { +		v_ret = VIRTCHNL_STATUS_ERR_PARAM; +		goto err; +	} + +	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { +		dev_err(dev, "RSS not supported by PF\n"); +		v_ret = VIRTCHNL_STATUS_ERR_PARAM; +		goto err; +	} + +	vsi = ice_get_vf_vsi(vf); +	if (!vsi) { +		v_ret = VIRTCHNL_STATUS_ERR_PARAM; +		goto err; +	} + +	/* clear all previously programmed RSS configuration to allow VF drivers +	 * the ability to customize the RSS configuration and/or completely +	 * disable RSS +	 */ +	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); +	if (status && !vrh->hena) { +		/* only report failure to clear the current RSS configuration if +		 * that was clearly the VF's intention (i.e. vrh->hena = 0) +		 */ +		v_ret = ice_err_to_virt_err(status); +		goto err; +	} else if (status) { +		/* allow the VF to update the RSS configuration even on failure +		 * to clear the current RSS confguration in an attempt to keep +		 * RSS in a working state +		 */ +		dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n", +			 vf->vf_id); +	} + +	if (vrh->hena) { +		status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena); +		v_ret = ice_err_to_virt_err(status); +	} + +	/* send the response to the VF */ +err: +	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret, +				     NULL, 0); +} + +/** + * ice_vc_query_rxdid - query RXDID supported by DDP package + * @vf: pointer to VF info + * + * Called from VF to query a bitmap of supported flexible + * descriptor RXDIDs of a DDP package. + */ +static int ice_vc_query_rxdid(struct ice_vf *vf) +{ +	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; +	struct virtchnl_supported_rxdids *rxdid = NULL; +	struct ice_hw *hw = &vf->pf->hw; +	struct ice_pf *pf = vf->pf; +	int len = 0; +	int ret, i; +	u32 regval; + +	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { +		v_ret = VIRTCHNL_STATUS_ERR_PARAM; +		goto err; +	} + +	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) { +		v_ret = VIRTCHNL_STATUS_ERR_PARAM; +		goto err; +	} + +	len = sizeof(struct virtchnl_supported_rxdids); +	rxdid = kzalloc(len, GFP_KERNEL); +	if (!rxdid) { +		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; +		len = 0; +		goto err; +	} + +	/* Read flexiflag registers to determine whether the +	 * corresponding RXDID is configured and supported or not. +	 * Since Legacy 16byte descriptor format is not supported, +	 * start from Legacy 32byte descriptor. +	 */ +	for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { +		regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); +		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) +			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) +			rxdid->supported_rxdids |= BIT(i); +	} + +	pf->supported_rxdids = rxdid->supported_rxdids; + +err: +	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, +				    v_ret, (u8 *)rxdid, len); +	kfree(rxdid); +	return ret; +} + +/**   * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization   * @vf: VF to enable/disable VLAN stripping for on initialization   * @@ -3490,6 +3670,9 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {  	.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,  	.add_vlan_msg = ice_vc_add_vlan_msg,  	.remove_vlan_msg = ice_vc_remove_vlan_msg, +	.query_rxdid = ice_vc_query_rxdid, +	.get_rss_hena = ice_vc_get_rss_hena, +	.set_rss_hena_msg = ice_vc_set_rss_hena,  	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,  	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,  	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg, @@ -3550,7 +3733,7 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)  		int result;  		if (!is_unicast_ether_addr(mac_addr) || -		    ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) +		    ether_addr_equal(mac_addr, vf->hw_lan_addr))  			continue;  		if (vf->pf_set_mac) { @@ -3624,6 +3807,9 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {  	.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,  	.add_vlan_msg = ice_vc_add_vlan_msg,  	.remove_vlan_msg = ice_vc_remove_vlan_msg, +	.query_rxdid = ice_vc_query_rxdid, +	.get_rss_hena = ice_vc_get_rss_hena, +	.set_rss_hena_msg = ice_vc_set_rss_hena,  	.ena_vlan_stripping = ice_vc_ena_vlan_stripping,  	.dis_vlan_stripping = ice_vc_dis_vlan_stripping,  	.handle_rss_cfg_msg = ice_vc_handle_rss_cfg, @@ -3764,6 +3950,15 @@ error_handler:  	case VIRTCHNL_OP_DEL_VLAN:  		err = ops->remove_vlan_msg(vf, msg);  		break; +	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: +		err = ops->query_rxdid(vf); +		break; +	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: +		err = ops->get_rss_hena(vf); +		break; +	case VIRTCHNL_OP_SET_RSS_HENA: +		err = ops->set_rss_hena_msg(vf, msg); +		break;  	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:  		err = ops->ena_vlan_stripping(vf);  		break; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index b5a3fd8adbb4..b454654d7b0c 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -17,6 +17,7 @@   * broadcast, and 16 for additional unicast/multicast filters   */  #define ICE_MAX_MACADDR_PER_VF		18 +#define ICE_FLEX_DESC_RXDID_MAX_NUM	64  struct ice_virtchnl_ops {  	int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); @@ -35,6 +36,9 @@ struct ice_virtchnl_ops {  	int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);  	int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);  	int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg); +	int (*query_rxdid)(struct ice_vf *vf); +	int (*get_rss_hena)(struct ice_vf *vf); +	int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg);  	int (*ena_vlan_stripping)(struct ice_vf *vf);  	int (*dis_vlan_stripping)(struct ice_vf *vf);  	int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index 5a82216e7d03..7d547fa616fa 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -70,6 +70,11 @@ static const u32 rss_pf_allowlist_opcodes[] = {  	VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,  }; +/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */ +static const u32 rx_flex_desc_allowlist_opcodes[] = { +	VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, +}; +  /* VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF */  static const u32 adv_rss_pf_allowlist_opcodes[] = {  	VIRTCHNL_OP_ADD_RSS_CFG, VIRTCHNL_OP_DEL_RSS_CFG, @@ -96,6 +101,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {  	ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_REQ_QUEUES, req_queues_allowlist_opcodes),  	ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN, vlan_allowlist_opcodes),  	ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes), +	ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, rx_flex_desc_allowlist_opcodes),  	ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),  	ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),  	ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes), diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index c6a58343d81d..e6ef6b303222 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -113,7 +113,7 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)  	if (!ice_vc_isvalid_vsi_id(vf, vsi_id))  		return -EINVAL; -	if (!pf->vsi[vf->lan_vsi_idx]) +	if (!ice_get_vf_vsi(vf))  		return -EINVAL;  	return 0; @@ -494,7 +494,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)  	vf_prof = fdir->fdir_prof[flow]; -	vf_vsi = pf->vsi[vf->lan_vsi_idx]; +	vf_vsi = ice_get_vf_vsi(vf);  	if (!vf_vsi) {  		dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);  		return; @@ -572,7 +572,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,  	pf = vf->pf;  	dev = ice_pf_to_dev(pf);  	hw = &pf->hw; -	vf_vsi = pf->vsi[vf->lan_vsi_idx]; +	vf_vsi = ice_get_vf_vsi(vf);  	if (!vf_vsi)  		return -EINVAL; @@ -1205,7 +1205,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,  	pf = vf->pf;  	dev = ice_pf_to_dev(pf);  	hw = &pf->hw; -	vsi = pf->vsi[vf->lan_vsi_idx]; +	vsi = ice_get_vf_vsi(vf);  	if (!vsi) {  		dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);  		return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 056c904b83cc..31565bbafa22 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -24,13 +24,24 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)   */  static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)  { -	memset(&vsi->rx_rings[q_idx]->rx_stats, 0, -	       sizeof(vsi->rx_rings[q_idx]->rx_stats)); -	memset(&vsi->tx_rings[q_idx]->stats, 0, -	       sizeof(vsi->tx_rings[q_idx]->stats)); +	struct ice_vsi_stats *vsi_stat; +	struct ice_pf *pf; + +	pf = vsi->back; +	if (!pf->vsi_stats) +		return; + +	vsi_stat = pf->vsi_stats[vsi->idx]; +	if (!vsi_stat) +		return; + +	memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0, +	       sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats)); +	memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0, +	       sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));  	if (ice_is_xdp_ena_vsi(vsi)) -		memset(&vsi->xdp_rings[q_idx]->stats, 0, -		       sizeof(vsi->xdp_rings[q_idx]->stats)); +		memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0, +		       sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));  }  /** @@ -587,6 +598,112 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)  }  /** + * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ + * @xdp_ring: XDP Tx ring + */ +static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) +{ +	u16 ntc = xdp_ring->next_to_clean; +	struct ice_tx_desc *tx_desc; +	u16 cnt = xdp_ring->count; +	struct ice_tx_buf *tx_buf; +	u16 completed_frames = 0; +	u16 xsk_frames = 0; +	u16 last_rs; +	int i; + +	last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1; +	tx_desc = ICE_TX_DESC(xdp_ring, last_rs); +	if (tx_desc->cmd_type_offset_bsz & +	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) { +		if (last_rs >= ntc) +			completed_frames = last_rs - ntc + 1; +		else +			completed_frames = last_rs + cnt - ntc + 1; +	} + +	if (!completed_frames) +		return; + +	if (likely(!xdp_ring->xdp_tx_active)) { +		xsk_frames = completed_frames; +		goto skip; +	} + +	ntc = xdp_ring->next_to_clean; +	for (i = 0; i < completed_frames; i++) { +		tx_buf = &xdp_ring->tx_buf[ntc]; + +		if (tx_buf->type == ICE_TX_BUF_XSK_TX) { +			tx_buf->type = ICE_TX_BUF_EMPTY; +			xsk_buff_free(tx_buf->xdp); +			xdp_ring->xdp_tx_active--; +		} else { +			xsk_frames++; +		} + +		ntc++; +		if (ntc >= xdp_ring->count) +			ntc = 0; +	} +skip: +	tx_desc->cmd_type_offset_bsz = 0; +	xdp_ring->next_to_clean += completed_frames; +	if (xdp_ring->next_to_clean >= cnt) +		xdp_ring->next_to_clean -= cnt; +	if (xsk_frames) +		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); +} + +/** + * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX + * @xdp: XDP buffer to xmit + * @xdp_ring: XDP ring to produce descriptor onto + * + * note that this function works directly on xdp_buff, no need to convert + * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning + * side will be able to xsk_buff_free() it. + * + * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there + * was not enough space on XDP ring + */ +static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, +			      struct ice_tx_ring *xdp_ring) +{ +	u32 size = xdp->data_end - xdp->data; +	u32 ntu = xdp_ring->next_to_use; +	struct ice_tx_desc *tx_desc; +	struct ice_tx_buf *tx_buf; +	dma_addr_t dma; + +	if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) { +		ice_clean_xdp_irq_zc(xdp_ring); +		if (!ICE_DESC_UNUSED(xdp_ring)) { +			xdp_ring->ring_stats->tx_stats.tx_busy++; +			return ICE_XDP_CONSUMED; +		} +	} + +	dma = xsk_buff_xdp_get_dma(xdp); +	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size); + +	tx_buf = &xdp_ring->tx_buf[ntu]; +	tx_buf->xdp = xdp; +	tx_buf->type = ICE_TX_BUF_XSK_TX; +	tx_desc = ICE_TX_DESC(xdp_ring, ntu); +	tx_desc->buf_addr = cpu_to_le64(dma); +	tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, +						      0, size, 0); +	xdp_ring->xdp_tx_active++; + +	if (++ntu == xdp_ring->count) +		ntu = 0; +	xdp_ring->next_to_use = ntu; + +	return ICE_XDP_TX; +} + +/**   * ice_run_xdp_zc - Executes an XDP program in zero-copy path   * @rx_ring: Rx ring   * @xdp: xdp_buff used as input to the XDP program @@ -619,7 +736,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,  	case XDP_PASS:  		break;  	case XDP_TX: -		result = ice_xmit_xdp_buff(xdp, xdp_ring); +		result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);  		if (result == ICE_XDP_CONSUMED)  			goto out_failure;  		break; @@ -722,7 +839,7 @@ construct_skb:  		/* XDP_PASS path */  		skb = ice_construct_skb_zc(rx_ring, xdp);  		if (!skb) { -			rx_ring->rx_stats.alloc_buf_failed++; +			rx_ring->ring_stats->rx_stats.alloc_buf_failed++;  			break;  		} @@ -749,7 +866,7 @@ construct_skb:  	if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))  		failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); -	ice_finalize_xdp_rx(xdp_ring, xdp_xmit); +	ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);  	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);  	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { @@ -765,75 +882,6 @@ construct_skb:  }  /** - * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer - * @xdp_ring: XDP Tx ring - * @tx_buf: Tx buffer to clean - */ -static void -ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf) -{ -	xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); -	xdp_ring->xdp_tx_active--; -	dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), -			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); -	dma_unmap_len_set(tx_buf, len, 0); -} - -/** - * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ - * @xdp_ring: XDP Tx ring - */ -static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) -{ -	u16 ntc = xdp_ring->next_to_clean; -	struct ice_tx_desc *tx_desc; -	u16 cnt = xdp_ring->count; -	struct ice_tx_buf *tx_buf; -	u16 xsk_frames = 0; -	u16 last_rs; -	int i; - -	last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1; -	tx_desc = ICE_TX_DESC(xdp_ring, last_rs); -	if ((tx_desc->cmd_type_offset_bsz & -	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) { -		if (last_rs >= ntc) -			xsk_frames = last_rs - ntc + 1; -		else -			xsk_frames = last_rs + cnt - ntc + 1; -	} - -	if (!xsk_frames) -		return; - -	if (likely(!xdp_ring->xdp_tx_active)) -		goto skip; - -	ntc = xdp_ring->next_to_clean; -	for (i = 0; i < xsk_frames; i++) { -		tx_buf = &xdp_ring->tx_buf[ntc]; - -		if (tx_buf->raw_buf) { -			ice_clean_xdp_tx_buf(xdp_ring, tx_buf); -			tx_buf->raw_buf = NULL; -		} else { -			xsk_frames++; -		} - -		ntc++; -		if (ntc >= xdp_ring->count) -			ntc = 0; -	} -skip: -	tx_desc->cmd_type_offset_bsz = 0; -	xdp_ring->next_to_clean += xsk_frames; -	if (xdp_ring->next_to_clean >= cnt) -		xdp_ring->next_to_clean -= cnt; -	if (xsk_frames) -		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); -} - -/**   * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor   * @xdp_ring: XDP ring to produce the HW Tx descriptor on   * @desc: AF_XDP descriptor to pull the DMA address and length from @@ -907,20 +955,6 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d  }  /** - * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU) - * @xdp_ring: XDP ring to produce the HW Tx descriptors on - */ -static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring) -{ -	u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; -	struct ice_tx_desc *tx_desc; - -	tx_desc = ICE_TX_DESC(xdp_ring, ntu); -	tx_desc->cmd_type_offset_bsz |= -		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); -} - -/**   * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring   * @xdp_ring: XDP ring to produce the HW Tx descriptors on   * @@ -1054,12 +1088,12 @@ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)  	while (ntc != ntu) {  		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; -		if (tx_buf->raw_buf) -			ice_clean_xdp_tx_buf(xdp_ring, tx_buf); -		else +		if (tx_buf->type == ICE_TX_BUF_XSK_TX) { +			tx_buf->type = ICE_TX_BUF_EMPTY; +			xsk_buff_free(tx_buf->xdp); +		} else {  			xsk_frames++; - -		tx_buf->raw_buf = NULL; +		}  		ntc++;  		if (ntc >= xdp_ring->count) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index e5f3e7680dc6..7d60da1b7bf4 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)  			*data = 1;  			return -1;  		} +		wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8); +		wr32(E1000_EIMS, BIT(0));  	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {  		shared_int = false;  		if (request_irq(irq, @@ -2311,15 +2313,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,  		ring = adapter->tx_ring[j];  		do { -			start = u64_stats_fetch_begin_irq(&ring->tx_syncp); +			start = u64_stats_fetch_begin(&ring->tx_syncp);  			data[i]   = ring->tx_stats.packets;  			data[i+1] = ring->tx_stats.bytes;  			data[i+2] = ring->tx_stats.restart_queue; -		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); +		} while (u64_stats_fetch_retry(&ring->tx_syncp, start));  		do { -			start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); +			start = u64_stats_fetch_begin(&ring->tx_syncp2);  			restart2  = ring->tx_stats.restart_queue2; -		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); +		} while (u64_stats_fetch_retry(&ring->tx_syncp2, start));  		data[i+2] += restart2;  		i += IGB_TX_QUEUE_STATS_LEN; @@ -2327,13 +2329,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,  	for (j = 0; j < adapter->num_rx_queues; j++) {  		ring = adapter->rx_ring[j];  		do { -			start = u64_stats_fetch_begin_irq(&ring->rx_syncp); +			start = u64_stats_fetch_begin(&ring->rx_syncp);  			data[i]   = ring->rx_stats.packets;  			data[i+1] = ring->rx_stats.bytes;  			data[i+2] = ring->rx_stats.drops;  			data[i+3] = ring->rx_stats.csum_err;  			data[i+4] = ring->rx_stats.alloc_failed; -		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); +		} while (u64_stats_fetch_retry(&ring->rx_syncp, start));  		i += IGB_RX_QUEUE_STATS_LEN;  	}  	spin_unlock(&adapter->stats64_lock); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f8e32833226c..03bc1e8af575 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1195,15 +1195,19 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,  		return -ENOMEM;  	ring_count = txr_count + rxr_count; -	size = struct_size(q_vector, ring, ring_count); +	size = kmalloc_size_roundup(struct_size(q_vector, ring, ring_count));  	/* allocate q_vector and rings */  	q_vector = adapter->q_vector[v_idx];  	if (!q_vector) {  		q_vector = kzalloc(size, GFP_KERNEL);  	} else if (size > ksize(q_vector)) { -		kfree_rcu(q_vector, rcu); -		q_vector = kzalloc(size, GFP_KERNEL); +		struct igb_q_vector *new_q_vector; + +		new_q_vector = kzalloc(size, GFP_KERNEL); +		if (new_q_vector) +			kfree_rcu(q_vector, rcu); +		q_vector = new_q_vector;  	} else {  		memset(q_vector, 0, size);  	} @@ -2252,6 +2256,30 @@ static void igb_enable_mas(struct igb_adapter *adapter)  	}  } +#ifdef CONFIG_IGB_HWMON +/** + *  igb_set_i2c_bb - Init I2C interface + *  @hw: pointer to hardware structure + **/ +static void igb_set_i2c_bb(struct e1000_hw *hw) +{ +	u32 ctrl_ext; +	s32 i2cctl; + +	ctrl_ext = rd32(E1000_CTRL_EXT); +	ctrl_ext |= E1000_CTRL_I2C_ENA; +	wr32(E1000_CTRL_EXT, ctrl_ext); +	wrfl(); + +	i2cctl = rd32(E1000_I2CPARAMS); +	i2cctl |= E1000_I2CBB_EN +		| E1000_I2C_CLK_OE_N +		| E1000_I2C_DATA_OE_N; +	wr32(E1000_I2CPARAMS, i2cctl); +	wrfl(); +} +#endif +  void igb_reset(struct igb_adapter *adapter)  {  	struct pci_dev *pdev = adapter->pdev; @@ -2396,7 +2424,8 @@ void igb_reset(struct igb_adapter *adapter)  			 * interface.  			 */  			if (adapter->ets) -				mac->ops.init_thermal_sensor_thresh(hw); +				igb_set_i2c_bb(hw); +			mac->ops.init_thermal_sensor_thresh(hw);  		}  	}  #endif @@ -2806,6 +2835,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter,  	return 0;  } +static int igb_tc_query_caps(struct igb_adapter *adapter, +			     struct tc_query_caps_base *base) +{ +	switch (base->type) { +	case TC_SETUP_QDISC_TAPRIO: { +		struct tc_taprio_caps *caps = base->caps; + +		caps->broken_mqprio = true; + +		return 0; +	} +	default: +		return -EOPNOTSUPP; +	} +} +  static LIST_HEAD(igb_block_cb_list);  static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -2814,6 +2859,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,  	struct igb_adapter *adapter = netdev_priv(dev);  	switch (type) { +	case TC_QUERY_CAPS: +		return igb_tc_query_caps(adapter, type_data);  	case TC_SETUP_QDISC_CBS:  		return igb_offload_cbs(adapter, type_data);  	case TC_SETUP_BLOCK: @@ -2867,8 +2914,14 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)  		bpf_prog_put(old_prog);  	/* bpf is just replaced, RXQ and MTU are already setup */ -	if (!need_reset) +	if (!need_reset) {  		return 0; +	} else { +		if (prog) +			xdp_features_set_redirect_target(dev, true); +		else +			xdp_features_clear_redirect_target(dev); +	}  	if (running)  		igb_open(dev); @@ -3113,21 +3166,12 @@ static void igb_init_mas(struct igb_adapter *adapter)   **/  static s32 igb_init_i2c(struct igb_adapter *adapter)  { -	struct e1000_hw *hw = &adapter->hw;  	s32 status = 0; -	s32 i2cctl;  	/* I2C interface supported on i350 devices */  	if (adapter->hw.mac.type != e1000_i350)  		return 0; -	i2cctl = rd32(E1000_I2CPARAMS); -	i2cctl |= E1000_I2CBB_EN -		| E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N -		| E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N; -	wr32(E1000_I2CPARAMS, i2cctl); -	wrfl(); -  	/* Initialize the i2c bus which is controlled by the registers.  	 * This bus will use the i2c_algo_bit structure that implements  	 * the protocol through toggling of the 4 bits in the register. @@ -3190,8 +3234,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	if (err)  		goto err_pci_reg; -	pci_enable_pcie_error_reporting(pdev); -  	pci_set_master(pdev);  	pci_save_state(pdev); @@ -3313,6 +3355,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	netdev->priv_flags |= IFF_SUPP_NOFCS;  	netdev->priv_flags |= IFF_UNICAST_FLT; +	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;  	/* MTU range: 68 - 9216 */  	netdev->min_mtu = ETH_MIN_MTU; @@ -3517,6 +3560,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  			adapter->ets = true;  		else  			adapter->ets = false; +		/* Only enable I2C bit banging if an external thermal +		 * sensor is supported. +		 */ +		if (adapter->ets) +			igb_set_i2c_bb(hw); +		hw->mac.ops.init_thermal_sensor_thresh(hw);  		if (igb_sysfs_init(adapter))  			dev_err(&pdev->dev,  				"failed to allocate sysfs resources\n"); @@ -3622,7 +3671,6 @@ err_sw_init:  err_ioremap:  	free_netdev(netdev);  err_alloc_etherdev: -	pci_disable_pcie_error_reporting(pdev);  	pci_release_mem_regions(pdev);  err_pci_reg:  err_dma: @@ -3833,8 +3881,6 @@ static void igb_remove(struct pci_dev *pdev)  	kfree(adapter->shadow_vfta);  	free_netdev(netdev); -	pci_disable_pcie_error_reporting(pdev); -  	pci_disable_device(pdev);  } @@ -6632,10 +6678,10 @@ void igb_update_stats(struct igb_adapter *adapter)  		}  		do { -			start = u64_stats_fetch_begin_irq(&ring->rx_syncp); +			start = u64_stats_fetch_begin(&ring->rx_syncp);  			_bytes = ring->rx_stats.bytes;  			_packets = ring->rx_stats.packets; -		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); +		} while (u64_stats_fetch_retry(&ring->rx_syncp, start));  		bytes += _bytes;  		packets += _packets;  	} @@ -6648,10 +6694,10 @@ void igb_update_stats(struct igb_adapter *adapter)  	for (i = 0; i < adapter->num_tx_queues; i++) {  		struct igb_ring *ring = adapter->tx_ring[i];  		do { -			start = u64_stats_fetch_begin_irq(&ring->tx_syncp); +			start = u64_stats_fetch_begin(&ring->tx_syncp);  			_bytes = ring->tx_stats.bytes;  			_packets = ring->tx_stats.packets; -		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); +		} while (u64_stats_fetch_retry(&ring->tx_syncp, start));  		bytes += _bytes;  		packets += _packets;  	} @@ -6790,7 +6836,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)  	struct timespec64 ts;  	u32 tsauxc; -	if (pin < 0 || pin >= IGB_N_PEROUT) +	if (pin < 0 || pin >= IGB_N_SDP)  		return;  	spin_lock(&adapter->tmreg_lock); @@ -6798,7 +6844,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)  	if (hw->mac.type == e1000_82580 ||  	    hw->mac.type == e1000_i354 ||  	    hw->mac.type == e1000_i350) { -		s64 ns = timespec64_to_ns(&adapter->perout[pin].period); +		s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);  		u32 systiml, systimh, level_mask, level, rem;  		u64 systim, now; @@ -6846,8 +6892,8 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)  		ts.tv_nsec = (u32)systim;  		ts.tv_sec  = ((u32)(systim >> 32)) & 0xFF;  	} else { -		ts = timespec64_add(adapter->perout[pin].start, -				    adapter->perout[pin].period); +		ts = timespec64_add(adapter->perout[tsintr_tt].start, +				    adapter->perout[tsintr_tt].period);  	}  	/* u32 conversion of tv_sec is safe until y2106 */ @@ -6856,7 +6902,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)  	tsauxc = rd32(E1000_TSAUXC);  	tsauxc |= TSAUXC_EN_TT0;  	wr32(E1000_TSAUXC, tsauxc); -	adapter->perout[pin].start = ts; +	adapter->perout[tsintr_tt].start = ts;  	spin_unlock(&adapter->tmreg_lock);  } @@ -6870,7 +6916,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)  	struct ptp_clock_event event;  	struct timespec64 ts; -	if (pin < 0 || pin >= IGB_N_EXTTS) +	if (pin < 0 || pin >= IGB_N_SDP)  		return;  	if (hw->mac.type == e1000_82580 || @@ -7521,7 +7567,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)  {  	struct e1000_hw *hw = &adapter->hw;  	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; -	u32 reg, msgbuf[3]; +	u32 reg, msgbuf[3] = {};  	u8 *addr = (u8 *)(&msgbuf[1]);  	/* process all the same items cleared in a function level reset */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 15e57460e19e..6f471b91f562 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -195,23 +195,9 @@ static int igb_ptp_adjfine_82576(struct ptp_clock_info *ptp, long scaled_ppm)  	struct igb_adapter *igb = container_of(ptp, struct igb_adapter,  					       ptp_caps);  	struct e1000_hw *hw = &igb->hw; -	int neg_adj = 0; -	u64 rate; -	u32 incvalue; - -	if (scaled_ppm < 0) { -		neg_adj = 1; -		scaled_ppm = -scaled_ppm; -	} - -	incvalue = INCVALUE_82576; -	rate = mul_u64_u64_div_u64(incvalue, (u64)scaled_ppm, -				   1000000ULL << 16); +	u64 incvalue; -	if (neg_adj) -		incvalue -= rate; -	else -		incvalue += rate; +	incvalue = adjust_by_scaled_ppm(INCVALUE_82576, scaled_ppm);  	wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 1e7e7071f64d..df3e26c0cf01 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -94,6 +94,8 @@ struct igc_ring {  	u8 queue_index;                 /* logical index of the ring*/  	u8 reg_idx;                     /* physical index of the ring */  	bool launchtime_enable;         /* true if LaunchTime is enabled */ +	ktime_t last_tx_cycle;          /* end of the cycle with a launchtime transmission */ +	ktime_t last_ff_cycle;          /* Last cycle with an active first flag */  	u32 start_time;  	u32 end_time; @@ -182,6 +184,7 @@ struct igc_adapter {  	ktime_t base_time;  	ktime_t cycle_time; +	bool qbv_enable;  	/* OS defined structs */  	struct pci_dev *pdev; diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index a15927e77272..a1d815af507d 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -396,6 +396,35 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)  	rd32(IGC_MPC);  } +bool igc_is_device_id_i225(struct igc_hw *hw) +{ +	switch (hw->device_id) { +	case IGC_DEV_ID_I225_LM: +	case IGC_DEV_ID_I225_V: +	case IGC_DEV_ID_I225_I: +	case IGC_DEV_ID_I225_K: +	case IGC_DEV_ID_I225_K2: +	case IGC_DEV_ID_I225_LMVP: +	case IGC_DEV_ID_I225_IT: +		return true; +	default: +		return false; +	} +} + +bool igc_is_device_id_i226(struct igc_hw *hw) +{ +	switch (hw->device_id) { +	case IGC_DEV_ID_I226_LM: +	case IGC_DEV_ID_I226_V: +	case IGC_DEV_ID_I226_K: +	case IGC_DEV_ID_I226_IT: +		return true; +	default: +		return false; +	} +} +  static struct igc_mac_operations igc_mac_ops_base = {  	.init_hw		= igc_init_hw_base,  	.check_for_link		= igc_check_for_copper_link, diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index ce530f5fd7bd..7a992befca24 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -7,6 +7,8 @@  /* forward declaration */  void igc_rx_fifo_flush_base(struct igc_hw *hw);  void igc_power_down_phy_copper_base(struct igc_hw *hw); +bool igc_is_device_id_i225(struct igc_hw *hw); +bool igc_is_device_id_i226(struct igc_hw *hw);  /* Transmit Descriptor - Advanced */  union igc_adv_tx_desc { diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 4f9d7f013a95..9dec3563ce3a 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -321,6 +321,8 @@  #define IGC_ADVTXD_L4LEN_SHIFT	8  /* Adv ctxt L4LEN shift */  #define IGC_ADVTXD_MSS_SHIFT	16 /* Adv ctxt MSS shift */ +#define IGC_ADVTXD_TSN_CNTX_FIRST	0x00000080 +  /* Transmit Control */  #define IGC_TCTL_EN		0x00000002 /* enable Tx */  #define IGC_TCTL_PSP		0x00000008 /* pad short packets */ @@ -400,6 +402,15 @@  #define IGC_DTXMXPKTSZ_TSN	0x19 /* 1600 bytes of max TX DMA packet size */  #define IGC_DTXMXPKTSZ_DEFAULT	0x98 /* 9728-byte Jumbo frames */ +/* Transmit Scheduling Latency */ +/* Latency between transmission scheduling (LaunchTime) and the time + * the packet is transmitted to the network in nanosecond. + */ +#define IGC_TXOFFSET_SPEED_10	0x000034BC +#define IGC_TXOFFSET_SPEED_100	0x00000578 +#define IGC_TXOFFSET_SPEED_1000	0x0000012C +#define IGC_TXOFFSET_SPEED_2500	0x00000578 +  /* Time Sync Interrupt Causes */  #define IGC_TSICR_SYS_WRAP	BIT(0) /* SYSTIM Wrap around. */  #define IGC_TSICR_TXTS		BIT(1) /* Transmit Timestamp. */ @@ -464,7 +475,9 @@  #define IGC_TSAUXC_EN_TT0	BIT(0)  /* Enable target time 0. */  #define IGC_TSAUXC_EN_TT1	BIT(1)  /* Enable target time 1. */  #define IGC_TSAUXC_EN_CLK0	BIT(2)  /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_ST0		BIT(4)  /* Start Clock 0 Toggle on Target Time 0. */  #define IGC_TSAUXC_EN_CLK1	BIT(5)  /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_ST1		BIT(7)  /* Start Clock 1 Toggle on Target Time 1. */  #define IGC_TSAUXC_EN_TS0	BIT(8)  /* Enable hardware timestamp 0. */  #define IGC_TSAUXC_AUTT0	BIT(9)  /* Auxiliary Timestamp Taken. */  #define IGC_TSAUXC_EN_TS1	BIT(10) /* Enable hardware timestamp 0. */ @@ -511,6 +524,7 @@  /* Transmit Scheduling */  #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN	0x00000001  #define IGC_TQAVCTRL_ENHANCED_QAV	0x00000008 +#define IGC_TQAVCTRL_FUTSCDDIS		0x00000080  #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT	0x00000001  #define IGC_TXQCTL_STRICT_CYCLE		0x00000002 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 8cc077b712ad..5a26a7805ef8 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -839,15 +839,15 @@ static void igc_ethtool_get_stats(struct net_device *netdev,  		ring = adapter->tx_ring[j];  		do { -			start = u64_stats_fetch_begin_irq(&ring->tx_syncp); +			start = u64_stats_fetch_begin(&ring->tx_syncp);  			data[i]   = ring->tx_stats.packets;  			data[i + 1] = ring->tx_stats.bytes;  			data[i + 2] = ring->tx_stats.restart_queue; -		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); +		} while (u64_stats_fetch_retry(&ring->tx_syncp, start));  		do { -			start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); +			start = u64_stats_fetch_begin(&ring->tx_syncp2);  			restart2  = ring->tx_stats.restart_queue2; -		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); +		} while (u64_stats_fetch_retry(&ring->tx_syncp2, start));  		data[i + 2] += restart2;  		i += IGC_TX_QUEUE_STATS_LEN; @@ -855,13 +855,13 @@ static void igc_ethtool_get_stats(struct net_device *netdev,  	for (j = 0; j < adapter->num_rx_queues; j++) {  		ring = adapter->rx_ring[j];  		do { -			start = u64_stats_fetch_begin_irq(&ring->rx_syncp); +			start = u64_stats_fetch_begin(&ring->rx_syncp);  			data[i]   = ring->rx_stats.packets;  			data[i + 1] = ring->rx_stats.bytes;  			data[i + 2] = ring->rx_stats.drops;  			data[i + 3] = ring->rx_stats.csum_err;  			data[i + 4] = ring->rx_stats.alloc_failed; -		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); +		} while (u64_stats_fetch_retry(&ring->rx_syncp, start));  		i += IGC_RX_QUEUE_STATS_LEN;  	}  	spin_unlock(&adapter->stats64_lock); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 34889be63e78..2928a6c73692 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)  	return netdev_mc_count(netdev);  } -static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, +				bool *first_flag, bool *insert_empty)  { +	struct igc_adapter *adapter = netdev_priv(ring->netdev);  	ktime_t cycle_time = adapter->cycle_time;  	ktime_t base_time = adapter->base_time; +	ktime_t now = ktime_get_clocktai(); +	ktime_t baset_est, end_of_cycle;  	u32 launchtime; +	s64 n; -	/* FIXME: when using ETF together with taprio, we may have a -	 * case where 'delta' is larger than the cycle_time, this may -	 * cause problems if we don't read the current value of -	 * IGC_BASET, as the value writen into the launchtime -	 * descriptor field may be misinterpreted. +	n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + +	baset_est = ktime_add_ns(base_time, cycle_time * (n)); +	end_of_cycle = ktime_add_ns(baset_est, cycle_time); + +	if (ktime_compare(txtime, end_of_cycle) >= 0) { +		if (baset_est != ring->last_ff_cycle) { +			*first_flag = true; +			ring->last_ff_cycle = baset_est; + +			if (ktime_compare(txtime, ring->last_tx_cycle) > 0) +				*insert_empty = true; +		} +	} + +	/* Introducing a window at end of cycle on which packets +	 * potentially not honor launchtime. Window of 5us chosen +	 * considering software update the tail pointer and packets +	 * are dma'ed to packet buffer.  	 */ -	div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); +	if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) +		netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", +			    txtime); + +	ring->last_tx_cycle = end_of_cycle; + +	launchtime = ktime_sub_ns(txtime, baset_est); +	if (launchtime > 0) +		div_s64_rem(launchtime, cycle_time, &launchtime); +	else +		launchtime = 0;  	return cpu_to_le32(launchtime);  } +static int igc_init_empty_frame(struct igc_ring *ring, +				struct igc_tx_buffer *buffer, +				struct sk_buff *skb) +{ +	unsigned int size; +	dma_addr_t dma; + +	size = skb_headlen(skb); + +	dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); +	if (dma_mapping_error(ring->dev, dma)) { +		netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); +		return -ENOMEM; +	} + +	buffer->skb = skb; +	buffer->protocol = 0; +	buffer->bytecount = skb->len; +	buffer->gso_segs = 1; +	buffer->time_stamp = jiffies; +	dma_unmap_len_set(buffer, len, skb->len); +	dma_unmap_addr_set(buffer, dma, dma); + +	return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, +					struct sk_buff *skb, +					struct igc_tx_buffer *first) +{ +	union igc_adv_tx_desc *desc; +	u32 cmd_type, olinfo_status; +	int err; + +	if (!igc_desc_unused(ring)) +		return -EBUSY; + +	err = igc_init_empty_frame(ring, first, skb); +	if (err) +		return err; + +	cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | +		   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | +		   first->bytecount; +	olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + +	desc = IGC_TX_DESC(ring, ring->next_to_use); +	desc->read.cmd_type_len = cpu_to_le32(cmd_type); +	desc->read.olinfo_status = cpu_to_le32(olinfo_status); +	desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + +	netdev_tx_sent_queue(txring_txq(ring), skb->len); + +	first->next_to_watch = desc; + +	ring->next_to_use++; +	if (ring->next_to_use == ring->count) +		ring->next_to_use = 0; + +	return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 +  static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, -			    struct igc_tx_buffer *first, +			    __le32 launch_time, bool first_flag,  			    u32 vlan_macip_lens, u32 type_tucmd,  			    u32 mss_l4len_idx)  { @@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,  	if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))  		mss_l4len_idx |= tx_ring->reg_idx << 4; +	if (first_flag) +		mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; +  	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);  	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);  	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx); - -	/* We assume there is always a valid Tx time available. Invalid times -	 * should have been handled by the upper layers. -	 */ -	if (tx_ring->launchtime_enable) { -		struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); -		ktime_t txtime = first->skb->tstamp; - -		skb_txtime_consumed(first->skb); -		context_desc->launch_time = igc_tx_launchtime(adapter, -							      txtime); -	} else { -		context_desc->launch_time = 0; -	} +	context_desc->launch_time	= launch_time;  } -static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, +			__le32 launch_time, bool first_flag)  {  	struct sk_buff *skb = first->skb;  	u32 vlan_macip_lens = 0; @@ -1096,7 +1180,8 @@ no_csum:  	vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;  	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; -	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); +	igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, +			vlan_macip_lens, type_tucmd, 0);  }  static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) @@ -1320,6 +1405,7 @@ dma_error:  static int igc_tso(struct igc_ring *tx_ring,  		   struct igc_tx_buffer *first, +		   __le32 launch_time, bool first_flag,  		   u8 *hdr_len)  {  	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; @@ -1406,8 +1492,8 @@ static int igc_tso(struct igc_ring *tx_ring,  	vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;  	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; -	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, -			type_tucmd, mss_l4len_idx); +	igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, +			vlan_macip_lens, type_tucmd, mss_l4len_idx);  	return 1;  } @@ -1415,11 +1501,14 @@ static int igc_tso(struct igc_ring *tx_ring,  static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,  				       struct igc_ring *tx_ring)  { +	bool first_flag = false, insert_empty = false;  	u16 count = TXD_USE_COUNT(skb_headlen(skb));  	__be16 protocol = vlan_get_protocol(skb);  	struct igc_tx_buffer *first; +	__le32 launch_time = 0;  	u32 tx_flags = 0;  	unsigned short f; +	ktime_t txtime;  	u8 hdr_len = 0;  	int tso = 0; @@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,  		count += TXD_USE_COUNT(skb_frag_size(  						&skb_shinfo(skb)->frags[f])); -	if (igc_maybe_stop_tx(tx_ring, count + 3)) { +	if (igc_maybe_stop_tx(tx_ring, count + 5)) {  		/* this is a hard error */  		return NETDEV_TX_BUSY;  	} +	if (!tx_ring->launchtime_enable) +		goto done; + +	txtime = skb->tstamp; +	skb->tstamp = ktime_set(0, 0); +	launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + +	if (insert_empty) { +		struct igc_tx_buffer *empty_info; +		struct sk_buff *empty; +		void *data; + +		empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; +		empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); +		if (!empty) +			goto done; + +		data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); +		memset(data, 0, IGC_EMPTY_FRAME_SIZE); + +		igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + +		if (igc_init_tx_empty_descriptor(tx_ring, +						 empty, +						 empty_info) < 0) +			dev_kfree_skb_any(empty); +	} + +done:  	/* record the location of the first descriptor for this packet */  	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];  	first->type = IGC_TX_BUFFER_TYPE_SKB; @@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,  	first->tx_flags = tx_flags;  	first->protocol = protocol; -	tso = igc_tso(tx_ring, first, &hdr_len); +	tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);  	if (tso < 0)  		goto out_drop;  	else if (!tso) -		igc_tx_csum(tx_ring, first); +		igc_tx_csum(tx_ring, first, launch_time, first_flag);  	igc_tx_map(tx_ring, first, hdr_len); @@ -2824,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)  		if (tx_buffer->next_to_watch &&  		    time_after(jiffies, tx_buffer->time_stamp +  		    (adapter->tx_timeout_factor * HZ)) && -		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) { +		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && +		    (rd32(IGC_TDH(tx_ring->reg_idx)) != +		     readl(tx_ring->tail))) {  			/* detected Tx unit hang */  			netdev_err(tx_ring->netdev,  				   "Detected Tx Unit Hang\n" @@ -4682,10 +4802,10 @@ void igc_update_stats(struct igc_adapter *adapter)  		}  		do { -			start = u64_stats_fetch_begin_irq(&ring->rx_syncp); +			start = u64_stats_fetch_begin(&ring->rx_syncp);  			_bytes = ring->rx_stats.bytes;  			_packets = ring->rx_stats.packets; -		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); +		} while (u64_stats_fetch_retry(&ring->rx_syncp, start));  		bytes += _bytes;  		packets += _packets;  	} @@ -4699,10 +4819,10 @@ void igc_update_stats(struct igc_adapter *adapter)  		struct igc_ring *ring = adapter->tx_ring[i];  		do { -			start = u64_stats_fetch_begin_irq(&ring->tx_syncp); +			start = u64_stats_fetch_begin(&ring->tx_syncp);  			_bytes = ring->tx_stats.bytes;  			_packets = ring->tx_stats.packets; -		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); +		} while (u64_stats_fetch_retry(&ring->tx_syncp, start));  		bytes += _bytes;  		packets += _packets;  	} @@ -4951,6 +5071,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)  }  /** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, +			   unsigned int __always_unused txqueue) +{ +	struct igc_adapter *adapter = netdev_priv(netdev); +	struct igc_hw *hw = &adapter->hw; + +	/* Do the reset outside of interrupt context */ +	adapter->tx_timeout_count++; +	schedule_work(&adapter->reset_task); +	wr32(IGC_EICS, +	     (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/**   * igc_get_stats64 - Get System Network Statistics   * @netdev: network interface device structure   * @stats: rtnl_link_stats64 pointer @@ -5377,10 +5515,17 @@ static void igc_watchdog_task(struct work_struct *work)  			case SPEED_100:  			case SPEED_1000:  			case SPEED_2500: -				adapter->tx_timeout_factor = 7; +				adapter->tx_timeout_factor = 1;  				break;  			} +			/* Once the launch time has been set on the wire, there +			 * is a delay before the link speed can be determined +			 * based on link-up activity. Write into the register +			 * as soon as we know the correct link speed. +			 */ +			igc_tsn_adjust_txtime_offset(adapter); +  			if (adapter->link_speed != SPEED_1000)  				goto no_wait; @@ -5833,6 +5978,7 @@ static bool validate_schedule(struct igc_adapter *adapter,  			      const struct tc_taprio_qopt_offload *qopt)  {  	int queue_uses[IGC_MAX_TX_QUEUES] = { }; +	struct igc_hw *hw = &adapter->hw;  	struct timespec64 now;  	size_t n; @@ -5845,8 +5991,10 @@ static bool validate_schedule(struct igc_adapter *adapter,  	 * in the future, it will hold all the packets until that  	 * time, causing a lot of TX Hangs, so to avoid that, we  	 * reject schedules that would start in the future. +	 * Note: Limitation above is no longer in i226.  	 */ -	if (!is_base_time_past(qopt->base_time, &now)) +	if (!is_base_time_past(qopt->base_time, &now) && +	    igc_is_device_id_i225(hw))  		return false;  	for (n = 0; n < qopt->num_entries; n++) { @@ -5916,13 +6064,20 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  				 struct tc_taprio_qopt_offload *qopt)  {  	bool queue_configured[IGC_MAX_TX_QUEUES] = { }; +	struct igc_hw *hw = &adapter->hw;  	u32 start_time = 0, end_time = 0;  	size_t n; +	int i; + +	adapter->qbv_enable = qopt->enable;  	if (!qopt->enable)  		return igc_tsn_clear_schedule(adapter); -	if (adapter->base_time) +	if (qopt->base_time < 0) +		return -ERANGE; + +	if (igc_is_device_id_i225(hw) && adapter->base_time)  		return -EALREADY;  	if (!validate_schedule(adapter, qopt)) @@ -5933,10 +6088,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  	for (n = 0; n < qopt->num_entries; n++) {  		struct tc_taprio_sched_entry *e = &qopt->entries[n]; -		int i;  		end_time += e->interval; +		/* If any of the conditions below are true, we need to manually +		 * control the end time of the cycle. +		 * 1. Qbv users can specify a cycle time that is not equal +		 * to the total GCL intervals. Hence, recalculation is +		 * necessary here to exclude the time interval that +		 * exceeds the cycle time. +		 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, +		 * once the end of the list is reached, it will switch +		 * to the END_OF_CYCLE state and leave the gates in the +		 * same state until the next cycle is started. +		 */ +		if (end_time > adapter->cycle_time || +		    n + 1 == qopt->num_entries) +			end_time = adapter->cycle_time; +  		for (i = 0; i < adapter->num_tx_queues; i++) {  			struct igc_ring *ring = adapter->tx_ring[i]; @@ -5957,6 +6126,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  		start_time += e->interval;  	} +	/* Check whether a queue gets configured. +	 * If not, set the start and end time to be end time. +	 */ +	for (i = 0; i < adapter->num_tx_queues; i++) { +		if (!queue_configured[i]) { +			struct igc_ring *ring = adapter->tx_ring[i]; + +			ring->start_time = end_time; +			ring->end_time = end_time; +		} +	} +  	return 0;  } @@ -6044,12 +6225,35 @@ static int igc_tsn_enable_cbs(struct igc_adapter *adapter,  	return igc_tsn_offload_apply(adapter);  } +static int igc_tc_query_caps(struct igc_adapter *adapter, +			     struct tc_query_caps_base *base) +{ +	struct igc_hw *hw = &adapter->hw; + +	switch (base->type) { +	case TC_SETUP_QDISC_TAPRIO: { +		struct tc_taprio_caps *caps = base->caps; + +		caps->broken_mqprio = true; + +		if (hw->mac.type == igc_i225) +			caps->gate_mask_per_txq = true; + +		return 0; +	} +	default: +		return -EOPNOTSUPP; +	} +} +  static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,  			void *type_data)  {  	struct igc_adapter *adapter = netdev_priv(dev);  	switch (type) { +	case TC_QUERY_CAPS: +		return igc_tc_query_caps(adapter, type_data);  	case TC_SETUP_QDISC_TAPRIO:  		return igc_tsn_enable_qbv_scheduling(adapter, type_data); @@ -6163,6 +6367,7 @@ static const struct net_device_ops igc_netdev_ops = {  	.ndo_set_rx_mode	= igc_set_rx_mode,  	.ndo_set_mac_address	= igc_set_mac,  	.ndo_change_mtu		= igc_change_mtu, +	.ndo_tx_timeout		= igc_tx_timeout,  	.ndo_get_stats64	= igc_get_stats64,  	.ndo_fix_features	= igc_fix_features,  	.ndo_set_features	= igc_set_features, @@ -6273,8 +6478,6 @@ static int igc_probe(struct pci_dev *pdev,  	if (err)  		goto err_pci_reg; -	pci_enable_pcie_error_reporting(pdev); -  	err = pci_enable_ptm(pdev, NULL);  	if (err < 0)  		dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); @@ -6372,6 +6575,9 @@ static int igc_probe(struct pci_dev *pdev,  	netdev->mpls_features |= NETIF_F_HW_CSUM;  	netdev->hw_enc_features |= netdev->vlan_features; +	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | +			       NETDEV_XDP_ACT_XSK_ZEROCOPY; +  	/* MTU range: 68 - 9216 */  	netdev->min_mtu = ETH_MIN_MTU;  	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; @@ -6479,7 +6685,6 @@ err_sw_init:  err_ioremap:  	free_netdev(netdev);  err_alloc_etherdev: -	pci_disable_pcie_error_reporting(pdev);  	pci_release_mem_regions(pdev);  err_pci_reg:  err_dma: @@ -6527,8 +6732,6 @@ static void igc_remove(struct pci_dev *pdev)  	free_netdev(netdev); -	pci_disable_pcie_error_reporting(pdev); -  	pci_disable_device(pdev);  } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 8dbb9f903ca7..4e10ced736db 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -322,7 +322,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,  		ts = ns_to_timespec64(ns);  		if (rq->perout.index == 1) {  			if (use_freq) { -				tsauxc_mask = IGC_TSAUXC_EN_CLK1; +				tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1;  				tsim_mask = 0;  			} else {  				tsauxc_mask = IGC_TSAUXC_EN_TT1; @@ -333,7 +333,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,  			freqout = IGC_FREQOUT1;  		} else {  			if (use_freq) { -				tsauxc_mask = IGC_TSAUXC_EN_CLK0; +				tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0;  				tsim_mask = 0;  			} else {  				tsauxc_mask = IGC_TSAUXC_EN_TT0; @@ -347,10 +347,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,  		tsauxc = rd32(IGC_TSAUXC);  		tsim = rd32(IGC_TSIM);  		if (rq->perout.index == 1) { -			tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1); +			tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | +				    IGC_TSAUXC_ST1);  			tsim &= ~IGC_TSICR_TT1;  		} else { -			tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0); +			tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | +				    IGC_TSAUXC_ST0);  			tsim &= ~IGC_TSICR_TT0;  		}  		if (on) { @@ -415,10 +417,12 @@ static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,   *   * We need to convert the system time value stored in the RX/TXSTMP registers   * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success.   **/ -static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, -				       struct skb_shared_hwtstamps *hwtstamps, -				       u64 systim) +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, +				      struct skb_shared_hwtstamps *hwtstamps, +				      u64 systim)  {  	switch (adapter->hw.mac.type) {  	case igc_i225: @@ -428,8 +432,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,  						systim & 0xFFFFFFFF);  		break;  	default: -		break; +		return -EINVAL;  	} +	return 0;  }  /** @@ -650,7 +655,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)  	regval = rd32(IGC_TXSTMPL);  	regval |= (u64)rd32(IGC_TXSTMPH) << 32; -	igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); +	if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) +		return;  	switch (adapter->link_speed) {  	case SPEED_10: diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index c0d8214148d1..01c86d36856d 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -224,6 +224,7 @@  /* Transmit Scheduling Registers */  #define IGC_TQAVCTRL		0x3570  #define IGC_TXQCTL(_n)		(0x3344 + 0x4 * (_n)) +#define IGC_GTXOFFSET		0x3310  #define IGC_BASET_L		0x3314  #define IGC_BASET_H		0x3318  #define IGC_QBVCYCLET		0x331C diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 0fce22de2ab8..a386c8d61dbf 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -2,6 +2,7 @@  /* Copyright (c)  2019 Intel Corporation */  #include "igc.h" +#include "igc_hw.h"  #include "igc_tsn.h"  static bool is_any_launchtime(struct igc_adapter *adapter) @@ -36,7 +37,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)  {  	unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; -	if (adapter->base_time) +	if (adapter->qbv_enable)  		new_flags |= IGC_FLAG_TSN_QBV_ENABLED;  	if (is_any_launchtime(adapter)) @@ -48,6 +49,35 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)  	return new_flags;  } +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) +{ +	struct igc_hw *hw = &adapter->hw; +	u16 txoffset; + +	if (!is_any_launchtime(adapter)) +		return; + +	switch (adapter->link_speed) { +	case SPEED_10: +		txoffset = IGC_TXOFFSET_SPEED_10; +		break; +	case SPEED_100: +		txoffset = IGC_TXOFFSET_SPEED_100; +		break; +	case SPEED_1000: +		txoffset = IGC_TXOFFSET_SPEED_1000; +		break; +	case SPEED_2500: +		txoffset = IGC_TXOFFSET_SPEED_2500; +		break; +	default: +		txoffset = 0; +		break; +	} + +	wr32(IGC_GTXOFFSET, txoffset); +} +  /* Returns the TSN specific registers to their default values after   * the adapter is reset.   */ @@ -57,12 +87,14 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)  	u32 tqavctrl;  	int i; +	wr32(IGC_GTXOFFSET, 0);  	wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);  	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);  	tqavctrl = rd32(IGC_TQAVCTRL);  	tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | -		      IGC_TQAVCTRL_ENHANCED_QAV); +		      IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); +  	wr32(IGC_TQAVCTRL, tqavctrl);  	for (i = 0; i < adapter->num_tx_queues; i++) { @@ -87,20 +119,10 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)  	ktime_t base_time, systim;  	int i; -	cycle = adapter->cycle_time; -	base_time = adapter->base_time; -  	wr32(IGC_TSAUXC, 0);  	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);  	wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); -	tqavctrl = rd32(IGC_TQAVCTRL); -	tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; -	wr32(IGC_TQAVCTRL, tqavctrl); - -	wr32(IGC_QBVCYCLET_S, cycle); -	wr32(IGC_QBVCYCLET, cycle); -  	for (i = 0; i < adapter->num_tx_queues; i++) {  		struct igc_ring *ring = adapter->tx_ring[i];  		u32 txqctl = 0; @@ -110,15 +132,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)  		wr32(IGC_STQT(i), ring->start_time);  		wr32(IGC_ENDQT(i), ring->end_time); -		if (adapter->base_time) { -			/* If we have a base_time we are in "taprio" -			 * mode and we need to be strict about the -			 * cycles: only transmit a packet if it can be -			 * completed during that cycle. -			 */ -			txqctl |= IGC_TXQCTL_STRICT_CYCLE | -				IGC_TXQCTL_STRICT_END; -		} +		txqctl |= IGC_TXQCTL_STRICT_CYCLE | +			IGC_TXQCTL_STRICT_END;  		if (ring->launchtime_enable)  			txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; @@ -210,21 +225,46 @@ skip_cbs:  		wr32(IGC_TXQCTL(i), txqctl);  	} +	tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; +	tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + +	cycle = adapter->cycle_time; +	base_time = adapter->base_time; +  	nsec = rd32(IGC_SYSTIML);  	sec = rd32(IGC_SYSTIMH);  	systim = ktime_set(sec, nsec); -  	if (ktime_compare(systim, base_time) > 0) { -		s64 n; +		s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); -		n = div64_s64(ktime_sub_ns(systim, base_time), cycle);  		base_time = ktime_add_ns(base_time, (n + 1) * cycle); +	} else { +		/* According to datasheet section 7.5.2.9.3.3, FutScdDis bit +		 * has to be configured before the cycle time and base time. +		 * Tx won't hang if there is a GCL is already running, +		 * so in this case we don't need to set FutScdDis. +		 */ +		if (igc_is_device_id_i226(hw) && +		    !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L))) +			tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;  	} -	baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); +	wr32(IGC_TQAVCTRL, tqavctrl); +	wr32(IGC_QBVCYCLET_S, cycle); +	wr32(IGC_QBVCYCLET, cycle); + +	baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);  	wr32(IGC_BASET_H, baset_h); + +	/* In i226, Future base time is only supported when FutScdDis bit +	 * is enabled and only active for re-configuration. +	 * In this case, initialize the base time with zero to create +	 * "re-configuration" scenario then only set the desired base time. +	 */ +	if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS) +		wr32(IGC_BASET_L, 0);  	wr32(IGC_BASET_L, baset_l);  	return 0; @@ -251,17 +291,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)  int igc_tsn_offload_apply(struct igc_adapter *adapter)  { -	int err; +	struct igc_hw *hw = &adapter->hw; -	if (netif_running(adapter->netdev)) { +	if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) {  		schedule_work(&adapter->reset_task);  		return 0;  	} -	err = igc_tsn_enable_offload(adapter); -	if (err < 0) -		return err; +	igc_tsn_reset(adapter); -	adapter->flags = igc_tsn_new_flags(adapter);  	return 0;  } diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h index 1512307f5a52..b53e6af560b7 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.h +++ b/drivers/net/ethernet/intel/igc/igc_tsn.h @@ -6,5 +6,6 @@  int igc_tsn_offload_apply(struct igc_adapter *adapter);  int igc_tsn_reset(struct igc_adapter *adapter); +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);  #endif /* _IGC_BASE_H */ diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c index aeeb34e64610..e27af72aada8 100644 --- a/drivers/net/ethernet/intel/igc/igc_xdp.c +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -29,6 +29,11 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,  	if (old_prog)  		bpf_prog_put(old_prog); +	if (prog) +		xdp_features_set_redirect_target(dev, true); +	else +		xdp_features_clear_redirect_target(dev); +  	if (if_running)  		igc_open(dev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5369a97ff5ec..8736ca4b2628 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -39,7 +39,10 @@  /* TX/RX descriptor defines */  #define IXGBE_DEFAULT_TXD		    512  #define IXGBE_DEFAULT_TX_WORK		    256 -#define IXGBE_MAX_TXD			   4096 +#define IXGBE_MAX_TXD_82598		   4096 +#define IXGBE_MAX_TXD_82599		   8192 +#define IXGBE_MAX_TXD_X540		   8192 +#define IXGBE_MAX_TXD_X550		  32768  #define IXGBE_MIN_TXD			     64  #if (PAGE_SIZE < 8192) @@ -47,7 +50,10 @@  #else  #define IXGBE_DEFAULT_RXD		    128  #endif -#define IXGBE_MAX_RXD			   4096 +#define IXGBE_MAX_RXD_82598		   4096 +#define IXGBE_MAX_RXD_82599		   8192 +#define IXGBE_MAX_RXD_X540		   8192 +#define IXGBE_MAX_RXD_X550		  32768  #define IXGBE_MIN_RXD			     64  /* flow control */ @@ -67,6 +73,8 @@  #define IXGBE_RXBUFFER_4K    4096  #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */ +#define IXGBE_PKT_HDR_PAD   (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) +  /* Attempt to maximize the headroom available for incoming frames.  We   * use a 2K buffer for receives and need 1536/1534 to store the data for   * the frame.  This leaves us with 512 bytes of room.  From that we need diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 38c4609bd429..878dd8dff528 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3292,13 +3292,14 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)  s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,  				 bool *link_up, bool link_up_wait_to_complete)  { +	bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);  	u32 links_reg, links_orig;  	u32 i;  	/* If Crosstalk fix enabled do the sanity check of making sure  	 * the SFP+ cage is full.  	 */ -	if (ixgbe_need_crosstalk_fix(hw)) { +	if (crosstalk_fix_active) {  		u32 sfp_cage_full;  		switch (hw->mac.type) { @@ -3346,10 +3347,24 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,  			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);  		}  	} else { -		if (links_reg & IXGBE_LINKS_UP) +		if (links_reg & IXGBE_LINKS_UP) { +			if (crosstalk_fix_active) { +				/* Check the link state again after a delay +				 * to filter out spurious link up +				 * notifications. +				 */ +				mdelay(5); +				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); +				if (!(links_reg & IXGBE_LINKS_UP)) { +					*link_up = false; +					*speed = IXGBE_LINK_SPEED_UNKNOWN; +					return 0; +				} +			}  			*link_up = true; -		else +		} else {  			*link_up = false; +		}  	}  	switch (links_reg & IXGBE_LINKS_SPEED_82599) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e88e3dfac8c2..6cfc9dc16537 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1117,6 +1117,42 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,  	drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;  } +static u32 ixgbe_get_max_rxd(struct ixgbe_adapter *adapter) +{ +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82598EB: +		return IXGBE_MAX_RXD_82598; +	case ixgbe_mac_82599EB: +		return IXGBE_MAX_RXD_82599; +	case ixgbe_mac_X540: +		return IXGBE_MAX_RXD_X540; +	case ixgbe_mac_X550: +	case ixgbe_mac_X550EM_x: +	case ixgbe_mac_x550em_a: +		return IXGBE_MAX_RXD_X550; +	default: +		return IXGBE_MAX_RXD_82598; +	} +} + +static u32 ixgbe_get_max_txd(struct ixgbe_adapter *adapter) +{ +	switch (adapter->hw.mac.type) { +	case ixgbe_mac_82598EB: +		return IXGBE_MAX_TXD_82598; +	case ixgbe_mac_82599EB: +		return IXGBE_MAX_TXD_82599; +	case ixgbe_mac_X540: +		return IXGBE_MAX_TXD_X540; +	case ixgbe_mac_X550: +	case ixgbe_mac_X550EM_x: +	case ixgbe_mac_x550em_a: +		return IXGBE_MAX_TXD_X550; +	default: +		return IXGBE_MAX_TXD_82598; +	} +} +  static void ixgbe_get_ringparam(struct net_device *netdev,  				struct ethtool_ringparam *ring,  				struct kernel_ethtool_ringparam *kernel_ring, @@ -1126,8 +1162,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,  	struct ixgbe_ring *tx_ring = adapter->tx_ring[0];  	struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; -	ring->rx_max_pending = IXGBE_MAX_RXD; -	ring->tx_max_pending = IXGBE_MAX_TXD; +	ring->rx_max_pending = ixgbe_get_max_rxd(adapter); +	ring->tx_max_pending = ixgbe_get_max_txd(adapter);  	ring->rx_pending = rx_ring->count;  	ring->tx_pending = tx_ring->count;  } @@ -1146,11 +1182,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,  		return -EINVAL;  	new_tx_count = clamp_t(u32, ring->tx_pending, -			       IXGBE_MIN_TXD, IXGBE_MAX_TXD); +			       IXGBE_MIN_TXD, ixgbe_get_max_txd(adapter));  	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);  	new_rx_count = clamp_t(u32, ring->rx_pending, -			       IXGBE_MIN_RXD, IXGBE_MAX_RXD); +			       IXGBE_MIN_RXD, ixgbe_get_max_rxd(adapter));  	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);  	if ((new_tx_count == adapter->tx_ring_count) && @@ -1335,10 +1371,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,  		}  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			data[i]   = ring->stats.packets;  			data[i+1] = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		i += 2;  	}  	for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { @@ -1351,10 +1387,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,  		}  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			data[i]   = ring->stats.packets;  			data[i+1] = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		i += 2;  	} @@ -1960,18 +1996,13 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,  				     unsigned int frame_size)  {  	unsigned char *data; -	bool match = true;  	frame_size >>= 1;  	data = page_address(rx_buffer->page) + rx_buffer->page_offset; -	if (data[3] != 0xFF || -	    data[frame_size + 10] != 0xBE || -	    data[frame_size + 12] != 0xAF) -		match = false; - -	return match; +	return data[3] == 0xFF && data[frame_size + 10] == 0xBE && +		data[frame_size + 12] == 0xAF;  }  static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 774de63dd93a..13a6fca31004 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -557,8 +557,10 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)  /**   * ixgbe_ipsec_add_sa - program device with a security association   * @xs: pointer to transformer state struct + * @extack: extack point to fill failure reason   **/ -static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) +static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, +			      struct netlink_ext_ack *extack)  {  	struct net_device *dev = xs->xso.real_dev;  	struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -570,18 +572,22 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)  	int i;  	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { -		netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", -			   xs->id.proto); +		NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload");  		return -EINVAL;  	}  	if (xs->props.mode != XFRM_MODE_TRANSPORT) { -		netdev_err(dev, "Unsupported mode for ipsec offload\n"); +		NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload");  		return -EINVAL;  	}  	if (ixgbe_ipsec_check_mgmt_ip(xs)) { -		netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); +		NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters"); +		return -EINVAL; +	} + +	if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { +		NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");  		return -EINVAL;  	} @@ -589,14 +595,14 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)  		struct rx_sa rsa;  		if (xs->calg) { -			netdev_err(dev, "Compression offload not supported\n"); +			NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");  			return -EINVAL;  		}  		/* find the first unused index */  		ret = ixgbe_ipsec_find_empty_idx(ipsec, true);  		if (ret < 0) { -			netdev_err(dev, "No space for SA in Rx table!\n"); +			NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");  			return ret;  		}  		sa_idx = (u16)ret; @@ -611,7 +617,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)  		/* get the key and salt */  		ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);  		if (ret) { -			netdev_err(dev, "Failed to get key data for Rx SA table\n"); +			NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");  			return ret;  		} @@ -671,7 +677,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)  		} else {  			/* no match and no empty slot */ -			netdev_err(dev, "No space for SA in Rx IP SA table\n"); +			NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx IP SA table");  			memset(&rsa, 0, sizeof(rsa));  			return -ENOSPC;  		} @@ -706,7 +712,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)  		/* find the first unused index */  		ret = ixgbe_ipsec_find_empty_idx(ipsec, false);  		if (ret < 0) { -			netdev_err(dev, "No space for SA in Tx table\n"); +			NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table");  			return ret;  		}  		sa_idx = (u16)ret; @@ -720,7 +726,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)  		ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);  		if (ret) { -			netdev_err(dev, "Failed to get key data for Tx SA table\n"); +			NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");  			memset(&tsa, 0, sizeof(tsa));  			return ret;  		} @@ -945,7 +951,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)  	memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));  	/* set up the HW offload */ -	err = ixgbe_ipsec_add_sa(xs); +	err = ixgbe_ipsec_add_sa(xs, NULL);  	if (err)  		goto err_aead; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 298cfbfcb7b6..773c35fecace 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6647,7 +6647,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,  			     rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)  		goto err; -	rx_ring->xdp_prog = adapter->xdp_prog; +	WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog);  	return 0;  err: @@ -6778,6 +6778,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)  }  /** + * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * @adapter: device handle, pointer to adapter + */ +static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter) +{ +	if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) +		return IXGBE_RXBUFFER_2K; +	else +		return IXGBE_RXBUFFER_3K; +} + +/**   * ixgbe_change_mtu - Change the Maximum Transfer Unit   * @netdev: network interface device structure   * @new_mtu: new value for maximum frame size @@ -6788,18 +6800,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)  {  	struct ixgbe_adapter *adapter = netdev_priv(netdev); -	if (adapter->xdp_prog) { -		int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + -				     VLAN_HLEN; -		int i; - -		for (i = 0; i < adapter->num_rx_queues; i++) { -			struct ixgbe_ring *ring = adapter->rx_ring[i]; +	if (ixgbe_enabled_xdp_adapter(adapter)) { +		int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD; -			if (new_frame_size > ixgbe_rx_bufsz(ring)) { -				e_warn(probe, "Requested MTU size is not supported with XDP\n"); -				return -EINVAL; -			} +		if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) { +			e_warn(probe, "Requested MTU size is not supported with XDP\n"); +			return -EINVAL;  		}  	} @@ -8937,7 +8943,8 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)  		int regnum = addr;  		if (devad != MDIO_DEVAD_NONE) -			regnum |= (devad << 16) | MII_ADDR_C45; +			return mdiobus_c45_read(adapter->mii_bus, prtad, +						devad, regnum);  		return mdiobus_read(adapter->mii_bus, prtad, regnum);  	} @@ -8960,7 +8967,8 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,  		int regnum = addr;  		if (devad != MDIO_DEVAD_NONE) -			regnum |= (devad << 16) | MII_ADDR_C45; +			return mdiobus_c45_write(adapter->mii_bus, prtad, devad, +						 regnum, value);  		return mdiobus_write(adapter->mii_bus, prtad, regnum, value);  	} @@ -9041,10 +9049,10 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,  	if (ring) {  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			packets = ring->stats.packets;  			bytes   = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		stats->tx_packets += packets;  		stats->tx_bytes   += bytes;  	} @@ -9064,10 +9072,10 @@ static void ixgbe_get_stats64(struct net_device *netdev,  		if (ring) {  			do { -				start = u64_stats_fetch_begin_irq(&ring->syncp); +				start = u64_stats_fetch_begin(&ring->syncp);  				packets = ring->stats.packets;  				bytes   = ring->stats.bytes; -			} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +			} while (u64_stats_fetch_retry(&ring->syncp, start));  			stats->rx_packets += packets;  			stats->rx_bytes   += bytes;  		} @@ -10297,14 +10305,15 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)  			synchronize_rcu();  		err = ixgbe_setup_tc(dev, adapter->hw_tcs); -		if (err) { -			rcu_assign_pointer(adapter->xdp_prog, old_prog); +		if (err)  			return -EINVAL; -		} +		if (!prog) +			xdp_features_clear_redirect_target(dev);  	} else { -		for (i = 0; i < adapter->num_rx_queues; i++) -			(void)xchg(&adapter->rx_ring[i]->xdp_prog, -			    adapter->xdp_prog); +		for (i = 0; i < adapter->num_rx_queues; i++) { +			WRITE_ONCE(adapter->rx_ring[i]->xdp_prog, +				   adapter->xdp_prog); +		}  	}  	if (old_prog) @@ -10320,6 +10329,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)  			if (adapter->xdp_ring[i]->xsk_pool)  				(void)ixgbe_xsk_wakeup(adapter->netdev, i,  						       XDP_WAKEUP_RX); +		xdp_features_set_redirect_target(dev, true);  	}  	return 0; @@ -10808,8 +10818,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		goto err_pci_reg;  	} -	pci_enable_pcie_error_reporting(pdev); -  	pci_set_master(pdev);  	pci_save_state(pdev); @@ -11017,6 +11025,9 @@ skip_sriov:  	netdev->priv_flags |= IFF_UNICAST_FLT;  	netdev->priv_flags |= IFF_SUPP_NOFCS; +	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | +			       NETDEV_XDP_ACT_XSK_ZEROCOPY; +  	/* MTU range: 68 - 9710 */  	netdev->min_mtu = ETH_MIN_MTU;  	netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); @@ -11237,7 +11248,6 @@ err_ioremap:  	disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);  	free_netdev(netdev);  err_alloc_etherdev: -	pci_disable_pcie_error_reporting(pdev);  	pci_release_mem_regions(pdev);  err_pci_reg:  err_dma: @@ -11326,8 +11336,6 @@ static void ixgbe_remove(struct pci_dev *pdev)  	disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);  	free_netdev(netdev); -	pci_disable_pcie_error_reporting(pdev); -  	if (disable_dev)  		pci_disable_device(pdev);  } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 24aa97f993ca..689470c1e8ad 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -680,14 +680,14 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)  }  /** - *  ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags + *  ixgbe_mii_bus_read_generic_c22 - Read a clause 22 register with gssr flags   *  @hw: pointer to hardware structure   *  @addr: address   *  @regnum: register number   *  @gssr: semaphore flags to acquire   **/ -static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr, -				      int regnum, u32 gssr) +static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr, +					  int regnum, u32 gssr)  {  	u32 hwaddr, cmd;  	s32 data; @@ -696,31 +696,52 @@ static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,  		return -EBUSY;  	hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; -	if (regnum & MII_ADDR_C45) { -		hwaddr |= regnum & GENMASK(21, 0); -		cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; -	} else { -		hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; -		cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | -			IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND; -	} +	hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; +	cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | +		IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;  	data = ixgbe_msca_cmd(hw, cmd);  	if (data < 0)  		goto mii_bus_read_done; -	/* For a clause 45 access the address cycle just completed, we still -	 * need to do the read command, otherwise just get the data -	 */ -	if (!(regnum & MII_ADDR_C45)) -		goto do_mii_bus_read; +	data = IXGBE_READ_REG(hw, IXGBE_MSRWD); +	data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); + +mii_bus_read_done: +	hw->mac.ops.release_swfw_sync(hw, gssr); +	return data; +} + +/** + *  ixgbe_mii_bus_read_generic_c45 - Read a clause 45 register with gssr flags + *  @hw: pointer to hardware structure + *  @addr: address + *  @devad: device address to read + *  @regnum: register number + *  @gssr: semaphore flags to acquire + **/ +static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr, +					  int devad, int regnum, u32 gssr) +{ +	u32 hwaddr, cmd; +	s32 data; + +	if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) +		return -EBUSY; + +	hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; +	hwaddr |= devad << 16 | regnum; +	cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; + +	data = ixgbe_msca_cmd(hw, cmd); +	if (data < 0) +		goto mii_bus_read_done;  	cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;  	data = ixgbe_msca_cmd(hw, cmd);  	if (data < 0)  		goto mii_bus_read_done; -do_mii_bus_read:  	data = IXGBE_READ_REG(hw, IXGBE_MSRWD);  	data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); @@ -730,15 +751,15 @@ mii_bus_read_done:  }  /** - *  ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags + *  ixgbe_mii_bus_write_generic_c22 - Write a clause 22 register with gssr flags   *  @hw: pointer to hardware structure   *  @addr: address   *  @regnum: register number   *  @val: value to write   *  @gssr: semaphore flags to acquire   **/ -static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr, -				       int regnum, u16 val, u32 gssr) +static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr, +					   int regnum, u16 val, u32 gssr)  {  	u32 hwaddr, cmd;  	s32 err; @@ -749,20 +770,43 @@ static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,  	IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);  	hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; -	if (regnum & MII_ADDR_C45) { -		hwaddr |= regnum & GENMASK(21, 0); -		cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; -	} else { -		hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; -		cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | -			IXGBE_MSCA_MDI_COMMAND; -	} +	hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; +	cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | +		IXGBE_MSCA_MDI_COMMAND; + +	err = ixgbe_msca_cmd(hw, cmd); + +	hw->mac.ops.release_swfw_sync(hw, gssr); +	return err; +} + +/** + *  ixgbe_mii_bus_write_generic_c45 - Write a clause 45 register with gssr flags + *  @hw: pointer to hardware structure + *  @addr: address + *  @devad: device address to read + *  @regnum: register number + *  @val: value to write + *  @gssr: semaphore flags to acquire + **/ +static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr, +					   int devad, int regnum, u16 val, +					   u32 gssr) +{ +	u32 hwaddr, cmd; +	s32 err; + +	if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) +		return -EBUSY; + +	IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); + +	hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; +	hwaddr |= devad << 16 | regnum; +	cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; -	/* For clause 45 this is an address cycle, for clause 22 this is the -	 * entire transaction -	 */  	err = ixgbe_msca_cmd(hw, cmd); -	if (err < 0 || !(regnum & MII_ADDR_C45)) +	if (err < 0)  		goto mii_bus_write_done;  	cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND; @@ -774,70 +818,144 @@ mii_bus_write_done:  }  /** - *  ixgbe_mii_bus_read - Read a clause 22/45 register + *  ixgbe_mii_bus_read_c22 - Read a clause 22 register   *  @bus: pointer to mii_bus structure which points to our driver private   *  @addr: address   *  @regnum: register number   **/ -static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum) +static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)  {  	struct ixgbe_adapter *adapter = bus->priv;  	struct ixgbe_hw *hw = &adapter->hw;  	u32 gssr = hw->phy.phy_semaphore_mask; -	return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); +	return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr);  }  /** - *  ixgbe_mii_bus_write - Write a clause 22/45 register + *  ixgbe_mii_bus_read_c45 - Read a clause 45 register   *  @bus: pointer to mii_bus structure which points to our driver private + *  @devad: device address to read   *  @addr: address   *  @regnum: register number + **/ +static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr, +				  int regnum) +{ +	struct ixgbe_adapter *adapter = bus->priv; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 gssr = hw->phy.phy_semaphore_mask; + +	return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr); +} + +/** + *  ixgbe_mii_bus_write_c22 - Write a clause 22 register + *  @bus: pointer to mii_bus structure which points to our driver private + *  @addr: address + *  @regnum: register number + *  @val: value to write + **/ +static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, +				   u16 val) +{ +	struct ixgbe_adapter *adapter = bus->priv; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 gssr = hw->phy.phy_semaphore_mask; + +	return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr); +} + +/** + *  ixgbe_mii_bus_write_c45 - Write a clause 45 register + *  @bus: pointer to mii_bus structure which points to our driver private + *  @addr: address + *  @devad: device address to read + *  @regnum: register number   *  @val: value to write   **/ -static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum, -			       u16 val) +static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, +				   int regnum, u16 val)  {  	struct ixgbe_adapter *adapter = bus->priv;  	struct ixgbe_hw *hw = &adapter->hw;  	u32 gssr = hw->phy.phy_semaphore_mask; -	return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); +	return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val, +					       gssr);  }  /** - *  ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a + *  ixgbe_x550em_a_mii_bus_read_c22 - Read a clause 22 register on x550em_a   *  @bus: pointer to mii_bus structure which points to our driver private   *  @addr: address   *  @regnum: register number   **/ -static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr, -				       int regnum) +static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr, +					   int regnum)  {  	struct ixgbe_adapter *adapter = bus->priv;  	struct ixgbe_hw *hw = &adapter->hw;  	u32 gssr = hw->phy.phy_semaphore_mask;  	gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; -	return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); +	return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr);  }  /** - *  ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a + *  ixgbe_x550em_a_mii_bus_read_c45 - Read a clause 45 register on x550em_a + *  @bus: pointer to mii_bus structure which points to our driver private + *  @addr: address + *  @devad: device address to read + *  @regnum: register number + **/ +static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr, +					   int devad, int regnum) +{ +	struct ixgbe_adapter *adapter = bus->priv; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 gssr = hw->phy.phy_semaphore_mask; + +	gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; +	return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr); +} + +/** + *  ixgbe_x550em_a_mii_bus_write_c22 - Write a clause 22 register on x550em_a   *  @bus: pointer to mii_bus structure which points to our driver private   *  @addr: address   *  @regnum: register number   *  @val: value to write   **/ -static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr, -					int regnum, u16 val) +static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr, +					    int regnum, u16 val)  {  	struct ixgbe_adapter *adapter = bus->priv;  	struct ixgbe_hw *hw = &adapter->hw;  	u32 gssr = hw->phy.phy_semaphore_mask;  	gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; -	return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); +	return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr); +} + +/** + *  ixgbe_x550em_a_mii_bus_write_c45 - Write a clause 45 register on x550em_a + *  @bus: pointer to mii_bus structure which points to our driver private + *  @addr: address + *  @devad: device address to read + *  @regnum: register number + *  @val: value to write + **/ +static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr, +					    int devad, int regnum, u16 val) +{ +	struct ixgbe_adapter *adapter = bus->priv; +	struct ixgbe_hw *hw = &adapter->hw; +	u32 gssr = hw->phy.phy_semaphore_mask; + +	gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; +	return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val, +					       gssr);  }  /** @@ -855,9 +973,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)  	rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);  	if (rp_pdev && rp_pdev->subordinate) {  		bus = rp_pdev->subordinate->number; +		pci_dev_put(rp_pdev);  		return pci_get_domain_bus_and_slot(0, bus, 0);  	} +	pci_dev_put(rp_pdev);  	return NULL;  } @@ -874,6 +994,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)  	struct ixgbe_adapter *adapter = hw->back;  	struct pci_dev *pdev = adapter->pdev;  	struct pci_dev *func0_pdev; +	bool has_mii = false;  	/* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices  	 * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0 @@ -884,15 +1005,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)  	func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));  	if (func0_pdev) {  		if (func0_pdev == pdev) -			return true; -		else -			return false; +			has_mii = true; +		goto out;  	}  	func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));  	if (func0_pdev == pdev) -		return true; +		has_mii = true; -	return false; +out: +	pci_dev_put(func0_pdev); +	return has_mii;  }  /** @@ -905,8 +1027,11 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)   **/  s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)  { -	s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); -	s32 (*read)(struct mii_bus *bus, int addr, int regnum); +	s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val); +	s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum); +	s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum, +			 u16 val); +	s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);  	struct ixgbe_adapter *adapter = hw->back;  	struct pci_dev *pdev = adapter->pdev;  	struct device *dev = &adapter->netdev->dev; @@ -925,12 +1050,16 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)  	case IXGBE_DEV_ID_X550EM_A_1G_T_L:  		if (!ixgbe_x550em_a_has_mii(hw))  			return 0; -		read = &ixgbe_x550em_a_mii_bus_read; -		write = &ixgbe_x550em_a_mii_bus_write; +		read_c22 = ixgbe_x550em_a_mii_bus_read_c22; +		write_c22 = ixgbe_x550em_a_mii_bus_write_c22; +		read_c45 = ixgbe_x550em_a_mii_bus_read_c45; +		write_c45 = ixgbe_x550em_a_mii_bus_write_c45;  		break;  	default: -		read = &ixgbe_mii_bus_read; -		write = &ixgbe_mii_bus_write; +		read_c22 = ixgbe_mii_bus_read_c22; +		write_c22 = ixgbe_mii_bus_write_c22; +		read_c45 = ixgbe_mii_bus_read_c45; +		write_c45 = ixgbe_mii_bus_write_c45;  		break;  	} @@ -938,8 +1067,10 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)  	if (!bus)  		return -ENOMEM; -	bus->read = read; -	bus->write = write; +	bus->read = read_c22; +	bus->write = write_c22; +	bus->read_c45 = read_c45; +	bus->write_c45 = write_c45;  	/* Use the position of the device in the PCI hierarchy as the id */  	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index f8605f57bd06..0310af851086 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -451,21 +451,11 @@ static int ixgbe_ptp_adjfine_82599(struct ptp_clock_info *ptp, long scaled_ppm)  	struct ixgbe_adapter *adapter =  		container_of(ptp, struct ixgbe_adapter, ptp_caps);  	struct ixgbe_hw *hw = &adapter->hw; -	u64 incval, diff; -	int neg_adj = 0; - -	if (scaled_ppm < 0) { -		neg_adj = 1; -		scaled_ppm = -scaled_ppm; -	} +	u64 incval;  	smp_mb();  	incval = READ_ONCE(adapter->base_incval); - -	diff = mul_u64_u64_div_u64(incval, scaled_ppm, -				   1000000ULL << 16); - -	incval = neg_adj ? (incval - diff) : (incval + diff); +	incval = adjust_by_scaled_ppm(incval, scaled_ppm);  	switch (hw->mac.type) {  	case ixgbe_mac_X540: @@ -502,17 +492,11 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)  	struct ixgbe_adapter *adapter =  			container_of(ptp, struct ixgbe_adapter, ptp_caps);  	struct ixgbe_hw *hw = &adapter->hw; -	int neg_adj = 0; +	bool neg_adj;  	u64 rate;  	u32 inca; -	if (scaled_ppm < 0) { -		neg_adj = 1; -		scaled_ppm = -scaled_ppm; -	} - -	rate = mul_u64_u64_div_u64(IXGBE_X550_BASE_PERIOD, scaled_ppm, -				   1000000ULL << 16); +	neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);  	/* warn if rate is too large */  	if (rate >= INCVALUE_MASK) @@ -1318,7 +1302,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)  	default:  		/* Other devices aren't supported */  		return; -	}; +	}  	IXGBE_WRITE_FLUSH(hw);  } diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index ccfa6b91aac6..296915414a7c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -458,10 +458,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,  		}  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			data[i]   = ring->stats.packets;  			data[i + 1] = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		i += 2;  	} @@ -475,10 +475,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,  		}  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			data[i] = ring->stats.packets;  			data[i + 1] = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		i += 2;  	} @@ -492,10 +492,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,  		}  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			data[i]   = ring->stats.packets;  			data[i + 1] = ring->stats.bytes; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		i += 2;  	}  } diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 9984ebc62d78..66cf17f19408 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -257,8 +257,10 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,  /**   * ixgbevf_ipsec_add_sa - program device with a security association   * @xs: pointer to transformer state struct + * @extack: extack point to fill failure reason   **/ -static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) +static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, +				struct netlink_ext_ack *extack)  {  	struct net_device *dev = xs->xso.real_dev;  	struct ixgbevf_adapter *adapter; @@ -270,13 +272,17 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)  	ipsec = adapter->ipsec;  	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { -		netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", -			   xs->id.proto); +		NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");  		return -EINVAL;  	}  	if (xs->props.mode != XFRM_MODE_TRANSPORT) { -		netdev_err(dev, "Unsupported mode for ipsec offload\n"); +		NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload"); +		return -EINVAL; +	} + +	if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { +		NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");  		return -EINVAL;  	} @@ -284,14 +290,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)  		struct rx_sa rsa;  		if (xs->calg) { -			netdev_err(dev, "Compression offload not supported\n"); +			NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");  			return -EINVAL;  		}  		/* find the first unused index */  		ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);  		if (ret < 0) { -			netdev_err(dev, "No space for SA in Rx table!\n"); +			NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");  			return ret;  		}  		sa_idx = (u16)ret; @@ -306,7 +312,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)  		/* get the key and salt */  		ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);  		if (ret) { -			netdev_err(dev, "Failed to get key data for Rx SA table\n"); +			NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");  			return ret;  		} @@ -345,7 +351,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)  		/* find the first unused index */  		ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);  		if (ret < 0) { -			netdev_err(dev, "No space for SA in Tx table\n"); +			NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table");  			return ret;  		}  		sa_idx = (u16)ret; @@ -359,7 +365,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)  		ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);  		if (ret) { -			netdev_err(dev, "Failed to get key data for Tx SA table\n"); +			NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");  			memset(&tsa, 0, sizeof(tsa));  			return ret;  		} diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e338fa572793..a44e4bd56142 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2044,12 +2044,16 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,  	spin_unlock_bh(&adapter->mbx_lock); -	/* translate error return types so error makes sense */ -	if (err == IXGBE_ERR_MBX) -		return -EIO; +	if (err) { +		netdev_err(netdev, "VF could not set VLAN %d\n", vid); + +		/* translate error return types so error makes sense */ +		if (err == IXGBE_ERR_MBX) +			return -EIO; -	if (err == IXGBE_ERR_INVALID_ARGUMENT) -		return -EACCES; +		if (err == IXGBE_ERR_INVALID_ARGUMENT) +			return -EACCES; +	}  	set_bit(vid, adapter->active_vlans); @@ -2070,6 +2074,9 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,  	spin_unlock_bh(&adapter->mbx_lock); +	if (err) +		netdev_err(netdev, "Could not remove VLAN %d\n", vid); +  	clear_bit(vid, adapter->active_vlans);  	return err; @@ -4350,10 +4357,10 @@ static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,  	if (ring) {  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			bytes = ring->stats.bytes;  			packets = ring->stats.packets; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		stats->tx_bytes += bytes;  		stats->tx_packets += packets;  	} @@ -4376,10 +4383,10 @@ static void ixgbevf_get_stats(struct net_device *netdev,  	for (i = 0; i < adapter->num_rx_queues; i++) {  		ring = adapter->rx_ring[i];  		do { -			start = u64_stats_fetch_begin_irq(&ring->syncp); +			start = u64_stats_fetch_begin(&ring->syncp);  			bytes = ring->stats.bytes;  			packets = ring->stats.packets; -		} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +		} while (u64_stats_fetch_retry(&ring->syncp, start));  		stats->rx_bytes += bytes;  		stats->rx_packets += packets;  	} @@ -4627,6 +4634,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  			    NETIF_F_HW_VLAN_CTAG_TX;  	netdev->priv_flags |= IFF_UNICAST_FLT; +	netdev->xdp_features = NETDEV_XDP_ACT_BASIC;  	/* MTU range: 68 - 1504 or 9710 */  	netdev->min_mtu = ETH_MIN_MTU;  |