diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
45 files changed, 1233 insertions, 1019 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 714bd1014ddb..c0e17433f623 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -167,17 +167,6 @@ config IXGBE  	  To compile this driver as a module, choose M here. The module  	  will be called ixgbe. -config IXGBE_VXLAN -	bool "Virtual eXtensible Local Area Network Support" -	default n -	depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m) -	---help--- -	  This allows one to create VXLAN virtual interfaces that provide -	  Layer 2 Networks over Layer 3 Networks. VXLAN is often used -	  to tunnel virtual network infrastructure in virtualized environments. -	  Say Y here if you want to use Virtual eXtensible Local Area Network -	  (VXLAN) in the driver. -  config IXGBE_HWMON  	bool "Intel(R) 10GbE PCI Express adapters HWMON support"  	default y @@ -236,27 +225,6 @@ config I40E  	  To compile this driver as a module, choose M here. The module  	  will be called i40e. -config I40E_VXLAN -	bool "Virtual eXtensible Local Area Network Support" -	default n -	depends on I40E && VXLAN && !(I40E=y && VXLAN=m) -	---help--- -	  This allows one to create VXLAN virtual interfaces that provide -	  Layer 2 Networks over Layer 3 Networks. VXLAN is often used -	  to tunnel virtual network infrastructure in virtualized environments. -	  Say Y here if you want to use Virtual eXtensible Local Area Network -	  (VXLAN) in the driver. - -config I40E_GENEVE -	bool "Generic Network Virtualization Encapsulation (GENEVE) Support" -	depends on I40E && GENEVE && !(I40E=y && GENEVE=m) -	default n -	---help--- -	  This allows one to create GENEVE virtual interfaces that provide -	  Layer 2 Networks over Layer 3 Networks. GENEVE is often used -	  to tunnel virtual network infrastructure in virtualized environments. -	  Say Y here if you want to use GENEVE in the driver. -  config I40E_DCB  	bool "Data Center Bridging (DCB) Support"  	default n @@ -307,15 +275,4 @@ config FM10K  	  To compile this driver as a module, choose M here. The module  	  will be called fm10k.  MSI-X interrupt support is required -config FM10K_VXLAN -	bool "Virtual eXtensible Local Area Network Support" -	default n -	depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m) -	---help--- -	  This allows one to create VXLAN virtual interfaces that provide -	  Layer 2 Networks over Layer 3 Networks. VXLAN is often used -	  to tunnel virtual network infrastructure in virtualized environments. -	  Say Y here if you want to use Virtual eXtensible Local Area Network -	  (VXLAN) in the driver. -  endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 75e60897b7e7..41f32c0b341e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2789,7 +2789,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)  }  /** - * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping + * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping   * @adapter: board private structure to initialize   **/  static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) @@ -4352,7 +4352,8 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)  			time_delta = systim_next - systim;  			temp = time_delta; -			rem = do_div(temp, incvalue); +			/* VMWare users have seen incvalue of zero, don't div / 0 */ +			rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);  			systim = systim_next; @@ -6915,6 +6916,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,  	if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))  		features &= ~NETIF_F_RXFCS; +	/* Since there is no support for separate Rx/Tx vlan accel +	 * enable/disable make sure Tx flag is always in same state as Rx. +	 */ +	if (features & NETIF_F_HW_VLAN_CTAG_RX) +		features |= NETIF_F_HW_VLAN_CTAG_TX; +	else +		features &= ~NETIF_F_HW_VLAN_CTAG_TX; +  	return features;  } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index fcf106e545c5..c4cf08dcf5af 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -362,6 +362,7 @@ enum fm10k_state_t {  	__FM10K_SERVICE_DISABLE,  	__FM10K_MBX_LOCK,  	__FM10K_LINK_DOWN, +	__FM10K_UPDATING_STATS,  };  static inline void fm10k_mbx_lock(struct fm10k_intfc *interface) @@ -406,7 +407,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)  	 (&(((union fm10k_rx_desc *)((R)->desc))[i]))  #define FM10K_MAX_TXD_PWR	14 -#define FM10K_MAX_DATA_PER_TXD	BIT(FM10K_MAX_TXD_PWR) +#define FM10K_MAX_DATA_PER_TXD	(1u << FM10K_MAX_TXD_PWR)  /* Tx Descriptors needed, worst case */  #define TXD_USE_COUNT(S)	DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) @@ -457,6 +458,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb);  netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,  				  struct fm10k_ring *tx_ring);  void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); +u64 fm10k_get_tx_pending(struct fm10k_ring *ring);  bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);  void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index 5bbf19cfe29b..d6baaea8bc7c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -519,8 +519,12 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)  		goto out;  	/* interface cannot receive traffic without logical ports */ -	if (mac->dglort_map == FM10K_DGLORTMAP_NONE) +	if (mac->dglort_map == FM10K_DGLORTMAP_NONE) { +		if (hw->mac.ops.request_lport_map) +			ret_val = hw->mac.ops.request_lport_map(hw); +  		goto out; +	}  	/* if we passed all the tests above then the switch is ready and we no  	 * longer need to check for link diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 9c0d87503977..c04cbe9c9f7c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -76,6 +76,8 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {  	FM10K_STAT("mac_rules_used", hw.swapi.mac.used),  	FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), +	FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending), +  	FM10K_STAT("tx_hang_count", tx_timeout_count),  }; @@ -983,9 +985,10 @@ void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)  		/* generate a new table if we weren't given one */  		for (j = 0; j < 4; j++) {  			if (indir) -				n = indir[i + j]; +				n = indir[4 * i + j];  			else -				n = ethtool_rxfh_indir_default(i + j, rss_i); +				n = ethtool_rxfh_indir_default(4 * i + j, +							       rss_i);  			table[j] = n;  		} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 0e166e9c90c8..e9767b6366a8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@  #include "fm10k.h" -#define DRV_VERSION	"0.19.3-k" +#define DRV_VERSION	"0.21.2-k"  #define DRV_SUMMARY	"Intel(R) Ethernet Switch Host Interface Driver"  const char fm10k_driver_version[] = DRV_VERSION;  char fm10k_driver_name[] = "fm10k"; @@ -56,7 +56,7 @@ static int __init fm10k_init_module(void)  	pr_info("%s\n", fm10k_copyright);  	/* create driver workqueue */ -	fm10k_workqueue = create_workqueue("fm10k"); +	fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0);  	fm10k_dbg_init(); @@ -77,7 +77,6 @@ static void __exit fm10k_exit_module(void)  	fm10k_dbg_exit();  	/* destroy driver workqueue */ -	flush_workqueue(fm10k_workqueue);  	destroy_workqueue(fm10k_workqueue);  }  module_exit(fm10k_exit_module); @@ -272,7 +271,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,  #if (PAGE_SIZE < 8192)  	unsigned int truesize = FM10K_RX_BUFSZ;  #else -	unsigned int truesize = SKB_DATA_ALIGN(size); +	unsigned int truesize = ALIGN(size, 512);  #endif  	unsigned int pull_len; @@ -1129,11 +1128,13 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)  	return ring->stats.packets;  } -static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) +u64 fm10k_get_tx_pending(struct fm10k_ring *ring)  { -	/* use SW head and tail until we have real hardware */ -	u32 head = ring->next_to_clean; -	u32 tail = ring->next_to_use; +	struct fm10k_intfc *interface = ring->q_vector->interface; +	struct fm10k_hw *hw = &interface->hw; + +	u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx)); +	u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));  	return ((head <= tail) ? tail : tail + ring->count) - head;  } @@ -1857,7 +1858,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)  	if (v_budget < 0) {  		kfree(interface->msix_entries);  		interface->msix_entries = NULL; -		return -ENOMEM; +		return v_budget;  	}  	/* record the number of queues available for q_vectors */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index b7dbc8a84c05..35c1dbad1330 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -41,6 +41,8 @@ struct fm10k_mbx_info;  #define FM10K_MBX_ACK_INTERRUPT			0x00000010  #define FM10K_MBX_INTERRUPT_ENABLE		0x00000020  #define FM10K_MBX_INTERRUPT_DISABLE		0x00000040 +#define FM10K_MBX_GLOBAL_REQ_INTERRUPT		0x00000200 +#define FM10K_MBX_GLOBAL_ACK_INTERRUPT		0x00000400  #define FM10K_MBICR(_n)		((_n) + 0x18840)  #define FM10K_GMBX		0x18842 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 2a08d3f5b6df..20a5bbe3f536 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -20,9 +20,7 @@  #include "fm10k.h"  #include <linux/vmalloc.h> -#ifdef CONFIG_FM10K_VXLAN -#include <net/vxlan.h> -#endif /* CONFIG_FM10K_VXLAN */ +#include <net/udp_tunnel.h>  /**   * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) @@ -434,8 +432,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)  /**   * fm10k_add_vxlan_port   * @netdev: network interface device structure - * @sa_family: Address family of new port - * @port: port number used for VXLAN + * @ti: Tunnel endpoint information   *   * This function is called when a new VXLAN interface has added a new port   * number to the range that is currently in use for VXLAN.  The new port @@ -444,18 +441,21 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)   * is always used as the VXLAN port number for offloads.   **/  static void fm10k_add_vxlan_port(struct net_device *dev, -				 sa_family_t sa_family, __be16 port) { +				 struct udp_tunnel_info *ti) +{  	struct fm10k_intfc *interface = netdev_priv(dev);  	struct fm10k_vxlan_port *vxlan_port; +	if (ti->type != UDP_TUNNEL_TYPE_VXLAN) +		return;  	/* only the PF supports configuring tunnels */  	if (interface->hw.mac.type != fm10k_mac_pf)  		return;  	/* existing ports are pulled out so our new entry is always last */  	fm10k_vxlan_port_for_each(vxlan_port, interface) { -		if ((vxlan_port->port == port) && -		    (vxlan_port->sa_family == sa_family)) { +		if ((vxlan_port->port == ti->port) && +		    (vxlan_port->sa_family == ti->sa_family)) {  			list_del(&vxlan_port->list);  			goto insert_tail;  		} @@ -465,8 +465,8 @@ static void fm10k_add_vxlan_port(struct net_device *dev,  	vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC);  	if (!vxlan_port)  		return; -	vxlan_port->port = port; -	vxlan_port->sa_family = sa_family; +	vxlan_port->port = ti->port; +	vxlan_port->sa_family = ti->sa_family;  insert_tail:  	/* add new port value to list */ @@ -478,8 +478,7 @@ insert_tail:  /**   * fm10k_del_vxlan_port   * @netdev: network interface device structure - * @sa_family: Address family of freed port - * @port: port number used for VXLAN + * @ti: Tunnel endpoint information   *   * This function is called when a new VXLAN interface has freed a port   * number from the range that is currently in use for VXLAN.  The freed @@ -487,17 +486,20 @@ insert_tail:   * the port number for offloads.   **/  static void fm10k_del_vxlan_port(struct net_device *dev, -				 sa_family_t sa_family, __be16 port) { +				 struct udp_tunnel_info *ti) +{  	struct fm10k_intfc *interface = netdev_priv(dev);  	struct fm10k_vxlan_port *vxlan_port; +	if (ti->type != UDP_TUNNEL_TYPE_VXLAN) +		return;  	if (interface->hw.mac.type != fm10k_mac_pf)  		return;  	/* find the port in the list and free it */  	fm10k_vxlan_port_for_each(vxlan_port, interface) { -		if ((vxlan_port->port == port) && -		    (vxlan_port->sa_family == sa_family)) { +		if ((vxlan_port->port == ti->port) && +		    (vxlan_port->sa_family == ti->sa_family)) {  			list_del(&vxlan_port->list);  			kfree(vxlan_port);  			break; @@ -553,10 +555,8 @@ int fm10k_open(struct net_device *netdev)  	if (err)  		goto err_set_queues; -#ifdef CONFIG_FM10K_VXLAN  	/* update VXLAN port configuration */ -	vxlan_get_rx_port(netdev); -#endif +	udp_tunnel_get_rx_info(netdev);  	fm10k_up(interface); @@ -1375,8 +1375,8 @@ static const struct net_device_ops fm10k_netdev_ops = {  	.ndo_set_vf_vlan	= fm10k_ndo_set_vf_vlan,  	.ndo_set_vf_rate	= fm10k_ndo_set_vf_bw,  	.ndo_get_vf_config	= fm10k_ndo_get_vf_config, -	.ndo_add_vxlan_port	= fm10k_add_vxlan_port, -	.ndo_del_vxlan_port	= fm10k_del_vxlan_port, +	.ndo_udp_tunnel_add	= fm10k_add_vxlan_port, +	.ndo_udp_tunnel_del	= fm10k_del_vxlan_port,  	.ndo_dfwd_add_station	= fm10k_dfwd_add_station,  	.ndo_dfwd_del_station	= fm10k_dfwd_del_station,  #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index e05aca9bef0e..b8245c734c96 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -123,11 +123,24 @@ static void fm10k_service_timer(unsigned long data)  static void fm10k_detach_subtask(struct fm10k_intfc *interface)  {  	struct net_device *netdev = interface->netdev; +	u32 __iomem *hw_addr; +	u32 value;  	/* do nothing if device is still present or hw_addr is set */  	if (netif_device_present(netdev) || interface->hw.hw_addr)  		return; +	/* check the real address space to see if we've recovered */ +	hw_addr = READ_ONCE(interface->uc_addr); +	value = readl(hw_addr); +	if ((~value)) { +		interface->hw.hw_addr = interface->uc_addr; +		netif_device_attach(netdev); +		interface->flags |= FM10K_FLAG_RESET_REQUESTED; +		netdev_warn(netdev, "PCIe link restored, device now attached\n"); +		return; +	} +  	rtnl_lock();  	if (netif_running(netdev)) @@ -136,11 +149,9 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)  	rtnl_unlock();  } -static void fm10k_reinit(struct fm10k_intfc *interface) +static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)  {  	struct net_device *netdev = interface->netdev; -	struct fm10k_hw *hw = &interface->hw; -	int err;  	WARN_ON(in_interrupt()); @@ -165,6 +176,19 @@ static void fm10k_reinit(struct fm10k_intfc *interface)  	/* delay any future reset requests */  	interface->last_reset = jiffies + (10 * HZ); +	rtnl_unlock(); +} + +static int fm10k_handle_reset(struct fm10k_intfc *interface) +{ +	struct net_device *netdev = interface->netdev; +	struct fm10k_hw *hw = &interface->hw; +	int err; + +	rtnl_lock(); + +	pci_set_master(interface->pdev); +  	/* reset and initialize the hardware so it is in a known state */  	err = hw->mac.ops.reset_hw(hw);  	if (err) { @@ -185,7 +209,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)  		goto reinit_err;  	} -	/* reassociate interrupts */ +	/* re-associate interrupts */  	err = fm10k_mbx_request_irq(interface);  	if (err)  		goto err_mbx_irq; @@ -219,7 +243,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)  	clear_bit(__FM10K_RESETTING, &interface->state); -	return; +	return err;  err_open:  	fm10k_mbx_free_irq(interface);  err_mbx_irq: @@ -230,6 +254,20 @@ reinit_err:  	rtnl_unlock();  	clear_bit(__FM10K_RESETTING, &interface->state); + +	return err; +} + +static void fm10k_reinit(struct fm10k_intfc *interface) +{ +	int err; + +	fm10k_prepare_for_reset(interface); + +	err = fm10k_handle_reset(interface); +	if (err) +		dev_err(&interface->pdev->dev, +			"fm10k_handle_reset failed: %d\n", err);  }  static void fm10k_reset_subtask(struct fm10k_intfc *interface) @@ -372,12 +410,19 @@ void fm10k_update_stats(struct fm10k_intfc *interface)  	u64 bytes, pkts;  	int i; +	/* ensure only one thread updates stats at a time */ +	if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) +		return; +  	/* do not allow stats update via service task for next second */  	interface->next_stats_update = jiffies + HZ;  	/* gather some stats to the interface struct that are per queue */  	for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { -		struct fm10k_ring *tx_ring = interface->tx_ring[i]; +		struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]); + +		if (!tx_ring) +			continue;  		restart_queue += tx_ring->tx_stats.restart_queue;  		tx_busy += tx_ring->tx_stats.tx_busy; @@ -396,7 +441,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface)  	/* gather some stats to the interface struct that are per queue */  	for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { -		struct fm10k_ring *rx_ring = interface->rx_ring[i]; +		struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]); + +		if (!rx_ring) +			continue;  		bytes += rx_ring->stats.bytes;  		pkts += rx_ring->stats.packets; @@ -443,6 +491,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface)  	/* Fill out the OS statistics structure */  	net_stats->rx_errors = rx_errors;  	net_stats->rx_dropped = interface->stats.nodesc_drop.count; + +	clear_bit(__FM10K_UPDATING_STATS, &interface->state);  }  /** @@ -1566,6 +1616,9 @@ void fm10k_up(struct fm10k_intfc *interface)  	/* configure interrupts */  	hw->mac.ops.update_int_moderator(hw); +	/* enable statistics capture again */ +	clear_bit(__FM10K_UPDATING_STATS, &interface->state); +  	/* clear down bit to indicate we are ready to go */  	clear_bit(__FM10K_DOWN, &interface->state); @@ -1598,10 +1651,11 @@ void fm10k_down(struct fm10k_intfc *interface)  {  	struct net_device *netdev = interface->netdev;  	struct fm10k_hw *hw = &interface->hw; -	int err; +	int err, i = 0, count = 0;  	/* signal that we are down to the interrupt handler and service task */ -	set_bit(__FM10K_DOWN, &interface->state); +	if (test_and_set_bit(__FM10K_DOWN, &interface->state)) +		return;  	/* call carrier off first to avoid false dev_watchdog timeouts */  	netif_carrier_off(netdev); @@ -1613,18 +1667,57 @@ void fm10k_down(struct fm10k_intfc *interface)  	/* reset Rx filters */  	fm10k_reset_rx_state(interface); -	/* allow 10ms for device to quiesce */ -	usleep_range(10000, 20000); -  	/* disable polling routines */  	fm10k_napi_disable_all(interface);  	/* capture stats one last time before stopping interface */  	fm10k_update_stats(interface); +	/* prevent updating statistics while we're down */ +	while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) +		usleep_range(1000, 2000); + +	/* skip waiting for TX DMA if we lost PCIe link */ +	if (FM10K_REMOVED(hw->hw_addr)) +		goto skip_tx_dma_drain; + +	/* In some rare circumstances it can take a while for Tx queues to +	 * quiesce and be fully disabled. Attempt to .stop_hw() first, and +	 * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop +	 * until the Tx queues have emptied, or until a number of retries. If +	 * we fail to clear within the retry loop, we will issue a warning +	 * indicating that Tx DMA is probably hung. Note this means we call +	 * .stop_hw() twice but this shouldn't cause any problems. +	 */ +	err = hw->mac.ops.stop_hw(hw); +	if (err != FM10K_ERR_REQUESTS_PENDING) +		goto skip_tx_dma_drain; + +#define TX_DMA_DRAIN_RETRIES 25 +	for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) { +		usleep_range(10000, 20000); + +		/* start checking at the last ring to have pending Tx */ +		for (; i < interface->num_tx_queues; i++) +			if (fm10k_get_tx_pending(interface->tx_ring[i])) +				break; + +		/* if all the queues are drained, we can break now */ +		if (i == interface->num_tx_queues) +			break; +	} + +	if (count >= TX_DMA_DRAIN_RETRIES) +		dev_err(&interface->pdev->dev, +			"Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n", +			count); +skip_tx_dma_drain:  	/* Disable DMA engine for Tx/Rx */  	err = hw->mac.ops.stop_hw(hw); -	if (err) +	if (err == FM10K_ERR_REQUESTS_PENDING) +		dev_err(&interface->pdev->dev, +			"due to pending requests hw was not shut down gracefully\n"); +	else if (err)  		dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);  	/* free any buffers still on the rings */ @@ -1750,6 +1843,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,  	/* Start off interface as being down */  	set_bit(__FM10K_DOWN, &interface->state); +	set_bit(__FM10K_UPDATING_STATS, &interface->state);  	return 0;  } @@ -2033,6 +2127,48 @@ static void fm10k_remove(struct pci_dev *pdev)  	pci_disable_device(pdev);  } +static void fm10k_prepare_suspend(struct fm10k_intfc *interface) +{ +	/* the watchdog task reads from registers, which might appear like +	 * a surprise remove if the PCIe device is disabled while we're +	 * stopped. We stop the watchdog task until after we resume software +	 * activity. +	 */ +	set_bit(__FM10K_SERVICE_DISABLE, &interface->state); +	cancel_work_sync(&interface->service_task); + +	fm10k_prepare_for_reset(interface); +} + +static int fm10k_handle_resume(struct fm10k_intfc *interface) +{ +	struct fm10k_hw *hw = &interface->hw; +	int err; + +	/* reset statistics starting values */ +	hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + +	err = fm10k_handle_reset(interface); +	if (err) +		return err; + +	/* assume host is not ready, to prevent race with watchdog in case we +	 * actually don't have connection to the switch +	 */ +	interface->host_ready = false; +	fm10k_watchdog_host_not_ready(interface); + +	/* force link to stay down for a second to prevent link flutter */ +	interface->link_down_event = jiffies + (HZ); +	set_bit(__FM10K_LINK_DOWN, &interface->state); + +	/* clear the service task disable bit to allow service task to start */ +	clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); +	fm10k_service_event_schedule(interface); + +	return err; +} +  #ifdef CONFIG_PM  /**   * fm10k_resume - Restore device to pre-sleep state @@ -2069,60 +2205,13 @@ static int fm10k_resume(struct pci_dev *pdev)  	/* refresh hw_addr in case it was dropped */  	hw->hw_addr = interface->uc_addr; -	/* reset hardware to known state */ -	err = hw->mac.ops.init_hw(&interface->hw); -	if (err) { -		dev_err(&pdev->dev, "init_hw failed: %d\n", err); -		return err; -	} - -	/* reset statistics starting values */ -	hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - -	rtnl_lock(); - -	err = fm10k_init_queueing_scheme(interface); -	if (err) -		goto err_queueing_scheme; - -	err = fm10k_mbx_request_irq(interface); -	if (err) -		goto err_mbx_irq; - -	err = fm10k_hw_ready(interface); +	err = fm10k_handle_resume(interface);  	if (err) -		goto err_open; - -	err = netif_running(netdev) ? fm10k_open(netdev) : 0; -	if (err) -		goto err_open; - -	rtnl_unlock(); - -	/* assume host is not ready, to prevent race with watchdog in case we -	 * actually don't have connection to the switch -	 */ -	interface->host_ready = false; -	fm10k_watchdog_host_not_ready(interface); - -	/* clear the service task disable bit to allow service task to start */ -	clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); -	fm10k_service_event_schedule(interface); - -	/* restore SR-IOV interface */ -	fm10k_iov_resume(pdev); +		return err;  	netif_device_attach(netdev);  	return 0; -err_open: -	fm10k_mbx_free_irq(interface); -err_mbx_irq: -	fm10k_clear_queueing_scheme(interface); -err_queueing_scheme: -	rtnl_unlock(); - -	return err;  }  /** @@ -2142,27 +2231,7 @@ static int fm10k_suspend(struct pci_dev *pdev,  	netif_device_detach(netdev); -	fm10k_iov_suspend(pdev); - -	/* the watchdog tasks may read registers, which will appear like a -	 * surprise-remove event once the PCI device is disabled. This will -	 * cause us to close the netdevice, so we don't retain the open/closed -	 * state post-resume. Prevent this by disabling the service task while -	 * suspended, until we actually resume. -	 */ -	set_bit(__FM10K_SERVICE_DISABLE, &interface->state); -	cancel_work_sync(&interface->service_task); - -	rtnl_lock(); - -	if (netif_running(netdev)) -		fm10k_close(netdev); - -	fm10k_mbx_free_irq(interface); - -	fm10k_clear_queueing_scheme(interface); - -	rtnl_unlock(); +	fm10k_prepare_suspend(interface);  	err = pci_save_state(pdev);  	if (err) @@ -2195,17 +2264,7 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,  	if (state == pci_channel_io_perm_failure)  		return PCI_ERS_RESULT_DISCONNECT; -	rtnl_lock(); - -	if (netif_running(netdev)) -		fm10k_close(netdev); - -	fm10k_mbx_free_irq(interface); - -	/* free interrupts */ -	fm10k_clear_queueing_scheme(interface); - -	rtnl_unlock(); +	fm10k_prepare_suspend(interface);  	/* Request a slot reset. */  	return PCI_ERS_RESULT_NEED_RESET; @@ -2219,7 +2278,6 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,   */  static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)  { -	struct fm10k_intfc *interface = pci_get_drvdata(pdev);  	pci_ers_result_t result;  	if (pci_enable_device_mem(pdev)) { @@ -2237,12 +2295,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)  		pci_wake_from_d3(pdev, false); -		/* refresh hw_addr in case it was dropped */ -		interface->hw.hw_addr = interface->uc_addr; - -		interface->flags |= FM10K_FLAG_RESET_REQUESTED; -		fm10k_service_event_schedule(interface); -  		result = PCI_ERS_RESULT_RECOVERED;  	} @@ -2262,50 +2314,54 @@ static void fm10k_io_resume(struct pci_dev *pdev)  {  	struct fm10k_intfc *interface = pci_get_drvdata(pdev);  	struct net_device *netdev = interface->netdev; -	struct fm10k_hw *hw = &interface->hw; -	int err = 0; - -	/* reset hardware to known state */ -	err = hw->mac.ops.init_hw(&interface->hw); -	if (err) { -		dev_err(&pdev->dev, "init_hw failed: %d\n", err); -		return; -	} - -	/* reset statistics starting values */ -	hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - -	rtnl_lock(); +	int err; -	err = fm10k_init_queueing_scheme(interface); -	if (err) { -		dev_err(&interface->pdev->dev, -			"init_queueing_scheme failed: %d\n", err); -		goto unlock; -	} +	err = fm10k_handle_resume(interface); -	/* reassociate interrupts */ -	fm10k_mbx_request_irq(interface); +	if (err) +		dev_warn(&pdev->dev, +			 "fm10k_io_resume failed: %d\n", err); +	else +		netif_device_attach(netdev); +} -	rtnl_lock(); -	if (netif_running(netdev)) -		err = fm10k_open(netdev); -	rtnl_unlock(); +/** + * fm10k_io_reset_notify - called when PCI function is reset + * @pdev: Pointer to PCI device + * + * This callback is called when the PCI function is reset such as from + * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it + * means we should prepare for a function reset. If prepare is false, it means + * the function reset just occurred. + */ +static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare) +{ +	struct fm10k_intfc *interface = pci_get_drvdata(pdev); +	int err = 0; -	/* final check of hardware state before registering the interface */ -	err = err ? : fm10k_hw_ready(interface); +	if (prepare) { +		/* warn incase we have any active VF devices */ +		if (pci_num_vf(pdev)) +			dev_warn(&pdev->dev, +				 "PCIe FLR may cause issues for any active VF devices\n"); -	if (!err) -		netif_device_attach(netdev); +		fm10k_prepare_suspend(interface); +	} else { +		err = fm10k_handle_resume(interface); +	} -unlock: -	rtnl_unlock(); +	if (err) { +		dev_warn(&pdev->dev, +			 "fm10k_io_reset_notify failed: %d\n", err); +		netif_device_detach(interface->netdev); +	}  }  static const struct pci_error_handlers fm10k_err_handler = {  	.error_detected = fm10k_io_error_detected,  	.slot_reset = fm10k_io_slot_reset,  	.resume = fm10k_io_resume, +	.reset_notify = fm10k_io_reset_notify,  };  static struct pci_driver fm10k_driver = { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index dc75507c9926..682299dd0ce4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -51,34 +51,37 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)  	/* shut down all rings */  	err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES); -	if (err) +	if (err == FM10K_ERR_REQUESTS_PENDING) { +		hw->mac.reset_while_pending++; +		goto force_reset; +	} else if (err) {  		return err; +	}  	/* Verify that DMA is no longer active */  	reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);  	if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))  		return FM10K_ERR_DMA_PENDING; -	/* verify the switch is ready for reset */ -	reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2); -	if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY)) -		goto out; - +force_reset:  	/* Inititate data path reset */ -	reg |= FM10K_DMA_CTRL_DATAPATH_RESET; +	reg = FM10K_DMA_CTRL_DATAPATH_RESET;  	fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);  	/* Flush write and allow 100us for reset to complete */  	fm10k_write_flush(hw);  	udelay(FM10K_RESET_TIMEOUT); +	/* Reset mailbox global interrupts */ +	reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT; +	fm10k_write_reg(hw, FM10K_GMBX, reg); +  	/* Verify we made it out of reset */  	reg = fm10k_read_reg(hw, FM10K_IP);  	if (!(reg & FM10K_IP_NOTINRESET)) -		err = FM10K_ERR_RESET_FAILED; +		return FM10K_ERR_RESET_FAILED; -out: -	return err; +	return 0;  }  /** @@ -1619,25 +1622,15 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)   **/  static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)  { -	s32 ret_val = 0;  	u32 dma_ctrl2;  	/* verify the switch is ready for interaction */  	dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);  	if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) -		goto out; +		return 0;  	/* retrieve generic host state info */ -	ret_val = fm10k_get_host_state_generic(hw, switch_ready); -	if (ret_val) -		goto out; - -	/* interface cannot receive traffic without logical ports */ -	if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) -		ret_val = fm10k_request_lport_map_pf(hw); - -out: -	return ret_val; +	return fm10k_get_host_state_generic(hw, switch_ready);  }  /* This structure defines the attibutes to be parsed below */ @@ -1813,6 +1806,7 @@ static const struct fm10k_mac_ops mac_ops_pf = {  	.set_dma_mask		= fm10k_set_dma_mask_pf,  	.get_fault		= fm10k_get_fault_pf,  	.get_host_state		= fm10k_get_host_state_pf, +	.request_lport_map	= fm10k_request_lport_map_pf,  };  static const struct fm10k_iov_ops iov_ops_pf = { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index b8bc06183720..f4e75c498287 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -526,6 +526,7 @@ struct fm10k_mac_ops {  	s32 (*stop_hw)(struct fm10k_hw *);  	s32 (*get_bus_info)(struct fm10k_hw *);  	s32 (*get_host_state)(struct fm10k_hw *, bool *); +	s32 (*request_lport_map)(struct fm10k_hw *);  	s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);  	s32 (*read_mac_addr)(struct fm10k_hw *);  	s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, @@ -562,6 +563,7 @@ struct fm10k_mac_info {  	bool tx_ready;  	u32 dglort_map;  	u8 itr_scale; +	u64 reset_while_pending;  };  struct fm10k_swapi_table_info { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 3b06685ea63b..337ba65a9411 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -34,7 +34,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)  	/* we need to disable the queues before taking further steps */  	err = fm10k_stop_hw_generic(hw); -	if (err) +	if (err && err != FM10K_ERR_REQUESTS_PENDING)  		return err;  	/* If permanent address is set then we need to restore it */ @@ -67,7 +67,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)  		fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen);  	} -	return 0; +	return err;  }  /** @@ -83,7 +83,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)  	/* shut down queues we own and reset DMA configuration */  	err = fm10k_stop_hw_vf(hw); -	if (err) +	if (err == FM10K_ERR_REQUESTS_PENDING) +		hw->mac.reset_while_pending++; +	else if (err)  		return err;  	/* Inititate VF reset */ @@ -96,9 +98,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)  	/* Clear reset bit and verify it was cleared */  	fm10k_write_reg(hw, FM10K_VFCTRL, 0);  	if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST) -		err = FM10K_ERR_RESET_FAILED; +		return FM10K_ERR_RESET_FAILED; -	return err; +	return 0;  }  /** diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 9c44739da5e2..2a882916b4f6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -283,6 +283,7 @@ struct i40e_pf {  #endif /* I40E_FCOE */  	u16 num_lan_qps;           /* num lan queues this PF has set up */  	u16 num_lan_msix;          /* num queue vectors for the base PF vsi */ +	u16 num_fdsb_msix;         /* num queue vectors for sideband Fdir */  	u16 num_iwarp_msix;        /* num of iwarp vectors for this PF */  	int iwarp_base_vector;  	int queues_left;           /* queues left unclaimed */ @@ -447,6 +448,14 @@ struct i40e_pf {  	u16 phy_led_val;  }; +enum i40e_filter_state { +	I40E_FILTER_INVALID = 0,	/* Invalid state */ +	I40E_FILTER_NEW,		/* New, not sent to FW yet */ +	I40E_FILTER_ACTIVE,		/* Added to switch by FW */ +	I40E_FILTER_FAILED,		/* Rejected by FW */ +	I40E_FILTER_REMOVE,		/* To be removed */ +/* There is no 'removed' state; the filter struct is freed */ +};  struct i40e_mac_filter {  	struct list_head list;  	u8 macaddr[ETH_ALEN]; @@ -455,8 +464,7 @@ struct i40e_mac_filter {  	u8 counter;		/* number of instances of this filter */  	bool is_vf;		/* filter belongs to a VF */  	bool is_netdev;		/* filter belongs to a netdev */ -	bool changed;		/* filter needs to be sync'd to the HW */ -	bool is_laa;		/* filter is a Locally Administered Address */ +	enum i40e_filter_state state;  };  struct i40e_veb { @@ -522,6 +530,9 @@ struct i40e_vsi {  	struct i40e_ring **rx_rings;  	struct i40e_ring **tx_rings; +	u32  active_filters; +	u32  promisc_threshold; +  	u16 work_limit;  	u16 int_rate_limit;  /* value in usecs */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 0e6ac841321c..e1370c556a3c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -980,13 +980,13 @@ int i40e_unregister_client(struct i40e_client *client)  	 * a close for each of the client instances that were opened.  	 * client_release function is called to handle this.  	 */ +	mutex_lock(&i40e_client_mutex);  	if (!client || i40e_client_release(client)) {  		ret = -EIO;  		goto out;  	}  	/* TODO: check if device is in reset, or if that matters? */ -	mutex_lock(&i40e_client_mutex);  	if (!i40e_client_is_registered(client)) {  		pr_info("i40e: Client %s has not been registered\n",  			client->name); @@ -1005,8 +1005,8 @@ int i40e_unregister_client(struct i40e_client *client)  		       client->name);  	} -	mutex_unlock(&i40e_client_mutex);  out: +	mutex_unlock(&i40e_client_mutex);  	return ret;  }  EXPORT_SYMBOL(i40e_unregister_client); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 422b41d61c9a..2154a34c1dd8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -61,7 +61,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)  		case I40E_DEV_ID_1G_BASE_T_X722:  		case I40E_DEV_ID_10G_BASE_T_X722:  		case I40E_DEV_ID_SFP_I_X722: -		case I40E_DEV_ID_QSFP_I_X722:  			hw->mac.type = I40E_MAC_X722;  			break;  		default: @@ -297,13 +296,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,  		   void *buffer, u16 buf_len)  {  	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; -	u16 len = le16_to_cpu(aq_desc->datalen); +	u16 len;  	u8 *buf = (u8 *)buffer;  	u16 i = 0;  	if ((!(mask & hw->debug_mask)) || (desc == NULL))  		return; +	len = le16_to_cpu(aq_desc->datalen); +  	i40e_debug(hw, mask,  		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",  		   le16_to_cpu(aq_desc->opcode), @@ -1967,6 +1968,62 @@ aq_add_vsi_exit:  }  /** + * i40e_aq_set_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, +				    u16 seid, +				    struct i40e_asq_cmd_details *cmd_details) +{ +	struct i40e_aq_desc desc; +	struct i40e_aqc_set_vsi_promiscuous_modes *cmd = +		(struct i40e_aqc_set_vsi_promiscuous_modes *) +		&desc.params.raw; +	i40e_status status; + +	i40e_fill_default_direct_cmd_desc(&desc, +					  i40e_aqc_opc_set_vsi_promiscuous_modes); + +	cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); +	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); +	cmd->seid = cpu_to_le16(seid); + +	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	return status; +} + +/** + * i40e_aq_clear_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, +				      u16 seid, +				      struct i40e_asq_cmd_details *cmd_details) +{ +	struct i40e_aq_desc desc; +	struct i40e_aqc_set_vsi_promiscuous_modes *cmd = +		(struct i40e_aqc_set_vsi_promiscuous_modes *) +		&desc.params.raw; +	i40e_status status; + +	i40e_fill_default_direct_cmd_desc(&desc, +					  i40e_aqc_opc_set_vsi_promiscuous_modes); + +	cmd->promiscuous_flags = cpu_to_le16(0); +	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); +	cmd->seid = cpu_to_le16(seid); + +	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	return status; +} + +/**   * i40e_aq_set_vsi_unicast_promiscuous   * @hw: pointer to the hw struct   * @seid: vsi number diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index e6af8c8d7019..05cf9a719bab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -116,6 +116,14 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,  	return len;  } +static char *i40e_filter_state_string[] = { +	"INVALID", +	"NEW", +	"ACTIVE", +	"FAILED", +	"REMOVE", +}; +  /**   * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum   * @pf: the i40e_pf created in command write @@ -160,10 +168,14 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)  			 pf->hw.mac.port_addr);  	list_for_each_entry(f, &vsi->mac_filter_list, list) {  		dev_info(&pf->pdev->dev, -			 "    mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", +			 "    mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n",  			 f->macaddr, f->vlan, f->is_netdev, f->is_vf, -			 f->counter); +			 f->counter, i40e_filter_state_string[f->state]);  	} +	dev_info(&pf->pdev->dev, "    active_filters %d, promisc_threshold %d, overflow promisc %s\n", +		 vsi->active_filters, vsi->promisc_threshold, +		 (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ? +		  "ON" : "OFF"));  	nstat = i40e_get_vsi_stats_struct(vsi);  	dev_info(&pf->pdev->dev,  		 "    net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index d701861c6e1e..dd4457d29e98 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -45,7 +45,6 @@  #define I40E_DEV_ID_1G_BASE_T_X722	0x37D1  #define I40E_DEV_ID_10G_BASE_T_X722	0x37D2  #define I40E_DEV_ID_SFP_I_X722		0x37D3 -#define I40E_DEV_ID_QSFP_I_X722		0x37D4  #define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \  					 (d) == I40E_DEV_ID_QSFP_B  || \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5e8d84ff7d5f..c912e041d102 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -272,15 +272,16 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,  				     u32 *advertising)  {  	enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types; - +	struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;  	*supported = 0x0;  	*advertising = 0x0;  	if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {  		*supported |= SUPPORTED_Autoneg |  			      SUPPORTED_1000baseT_Full; -		*advertising |= ADVERTISED_Autoneg | -				ADVERTISED_1000baseT_Full; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) +			*advertising |= ADVERTISED_1000baseT_Full;  		if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {  			*supported |= SUPPORTED_100baseT_Full;  			*advertising |= ADVERTISED_100baseT_Full; @@ -299,8 +300,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,  	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {  		*supported |= SUPPORTED_Autoneg |  			      SUPPORTED_10000baseT_Full; -		*advertising |= ADVERTISED_Autoneg | -				ADVERTISED_10000baseT_Full; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) +			*advertising |= ADVERTISED_10000baseT_Full;  	}  	if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||  	    phy_types & I40E_CAP_PHY_TYPE_XLPPI || @@ -310,15 +312,16 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,  	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {  		*supported |= SUPPORTED_Autoneg |  			      SUPPORTED_40000baseCR4_Full; -		*advertising |= ADVERTISED_Autoneg | -				ADVERTISED_40000baseCR4_Full; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) +			*advertising |= ADVERTISED_40000baseCR4_Full;  	} -	if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && -	    !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { +	if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {  		*supported |= SUPPORTED_Autoneg |  			      SUPPORTED_100baseT_Full; -		*advertising |= ADVERTISED_Autoneg | -				ADVERTISED_100baseT_Full; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) +			*advertising |= ADVERTISED_100baseT_Full;  	}  	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||  	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || @@ -326,8 +329,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,  	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {  		*supported |= SUPPORTED_Autoneg |  			      SUPPORTED_1000baseT_Full; -		*advertising |= ADVERTISED_Autoneg | -				ADVERTISED_1000baseT_Full; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) +			*advertising |= ADVERTISED_1000baseT_Full;  	}  	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)  		*supported |= SUPPORTED_40000baseSR4_Full; @@ -342,26 +346,30 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,  	if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {  		*supported |= SUPPORTED_20000baseKR2_Full |  			      SUPPORTED_Autoneg; -		*advertising |= ADVERTISED_20000baseKR2_Full | -				ADVERTISED_Autoneg; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) +			*advertising |= ADVERTISED_20000baseKR2_Full;  	}  	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {  		*supported |= SUPPORTED_10000baseKR_Full |  			      SUPPORTED_Autoneg; -		*advertising |= ADVERTISED_10000baseKR_Full | -				ADVERTISED_Autoneg; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) +			*advertising |= ADVERTISED_10000baseKR_Full;  	}  	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {  		*supported |= SUPPORTED_10000baseKX4_Full |  			      SUPPORTED_Autoneg; -		*advertising |= ADVERTISED_10000baseKX4_Full | -				ADVERTISED_Autoneg; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) +			*advertising |= ADVERTISED_10000baseKX4_Full;  	}  	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {  		*supported |= SUPPORTED_1000baseKX_Full |  			      SUPPORTED_Autoneg; -		*advertising |= ADVERTISED_1000baseKX_Full | -				ADVERTISED_Autoneg; +		*advertising |= ADVERTISED_Autoneg; +		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) +			*advertising |= ADVERTISED_1000baseKX_Full;  	}  } @@ -453,6 +461,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,  	case I40E_PHY_TYPE_10GBASE_SFPP_CU:  	case I40E_PHY_TYPE_10GBASE_AOC:  		ecmd->supported = SUPPORTED_10000baseT_Full; +		ecmd->advertising = SUPPORTED_10000baseT_Full;  		break;  	case I40E_PHY_TYPE_SGMII:  		ecmd->supported = SUPPORTED_Autoneg | @@ -663,6 +672,7 @@ static int i40e_set_settings(struct net_device *netdev,  	if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&  	    hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&  	    hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && +	    hw->phy.media_type != I40E_MEDIA_TYPE_DA &&  	    hw->phy.link_info.link_info & I40E_AQ_LINK_UP)  		return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5ea22008d721..339d99be4702 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -31,12 +31,7 @@  /* Local includes */  #include "i40e.h"  #include "i40e_diag.h" -#if IS_ENABLED(CONFIG_VXLAN) -#include <net/vxlan.h> -#endif -#if IS_ENABLED(CONFIG_GENEVE) -#include <net/geneve.h> -#endif +#include <net/udp_tunnel.h>  const char i40e_driver_name[] = "i40e";  static const char i40e_driver_string[] = @@ -45,8 +40,8 @@ static const char i40e_driver_string[] =  #define DRV_KERN "-k"  #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 16 +#define DRV_VERSION_MINOR 6 +#define DRV_VERSION_BUILD 11  #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \  	     __stringify(DRV_VERSION_MINOR) "." \  	     __stringify(DRV_VERSION_BUILD)    DRV_KERN @@ -91,7 +86,6 @@ static const struct pci_device_id i40e_pci_tbl[] = {  	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},  	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},  	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, -	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},  	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},  	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},  	/* required last entry */ @@ -1280,8 +1274,9 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,  		    (is_vf == f->is_vf) &&  		    (is_netdev == f->is_netdev)) {  			f->counter--; -			f->changed = true;  			changed = 1; +			if (f->counter == 0) +				f->state = I40E_FILTER_REMOVE;  		}  	}  	if (changed) { @@ -1297,29 +1292,32 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,   * @vsi: the PF Main VSI - inappropriate for any other VSI   * @macaddr: the MAC address   * - * Some older firmware configurations set up a default promiscuous VLAN - * filter that needs to be removed. + * Remove whatever filter the firmware set up so the driver can manage + * its own filtering intelligently.   **/ -static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) +static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)  {  	struct i40e_aqc_remove_macvlan_element_data element;  	struct i40e_pf *pf = vsi->back; -	i40e_status ret;  	/* Only appropriate for the PF main VSI */  	if (vsi->type != I40E_VSI_MAIN) -		return -EINVAL; +		return;  	memset(&element, 0, sizeof(element));  	ether_addr_copy(element.mac_addr, macaddr);  	element.vlan_tag = 0; -	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | -			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; -	ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); -	if (ret) -		return -ENOENT; +	/* Ignore error returns, some firmware does it this way... */ +	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; +	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); -	return 0; +	memset(&element, 0, sizeof(element)); +	ether_addr_copy(element.mac_addr, macaddr); +	element.vlan_tag = 0; +	/* ...and some firmware does it this way. */ +	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | +			I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; +	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);  }  /** @@ -1340,10 +1338,18 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,  					bool is_vf, bool is_netdev)  {  	struct i40e_mac_filter *f; +	int changed = false;  	if (!vsi || !macaddr)  		return NULL; +	/* Do not allow broadcast filter to be added since broadcast filter +	 * is added as part of add VSI for any newly created VSI except +	 * FDIR VSI +	 */ +	if (is_broadcast_ether_addr(macaddr)) +		return NULL; +  	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);  	if (!f) {  		f = kzalloc(sizeof(*f), GFP_ATOMIC); @@ -1352,8 +1358,15 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,  		ether_addr_copy(f->macaddr, macaddr);  		f->vlan = vlan; -		f->changed = true; - +		/* If we're in overflow promisc mode, set the state directly +		 * to failed, so we don't bother to try sending the filter +		 * to the hardware. +		 */ +		if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) +			f->state = I40E_FILTER_FAILED; +		else +			f->state = I40E_FILTER_NEW; +		changed = true;  		INIT_LIST_HEAD(&f->list);  		list_add_tail(&f->list, &vsi->mac_filter_list);  	} @@ -1373,10 +1386,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,  		f->counter++;  	} -	/* changed tells sync_filters_subtask to -	 * push the filter down to the firmware -	 */ -	if (f->changed) { +	if (changed) {  		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;  		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;  	} @@ -1395,6 +1405,9 @@ add_filter_out:   *   * NOTE: This function is expected to be called with mac_filter_list_lock   * being held. + * ANOTHER NOTE: This function MUST be called from within the context of + * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() + * instead of list_for_each_entry().   **/  void i40e_del_filter(struct i40e_vsi *vsi,  		     u8 *macaddr, s16 vlan, @@ -1434,9 +1447,18 @@ void i40e_del_filter(struct i40e_vsi *vsi,  	 * remove the filter from the firmware's list  	 */  	if (f->counter == 0) { -		f->changed = true; -		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; -		vsi->back->flags |= I40E_FLAG_FILTER_SYNC; +		if ((f->state == I40E_FILTER_FAILED) || +		    (f->state == I40E_FILTER_NEW)) { +			/* this one never got added by the FW. Just remove it, +			 * no need to sync anything. +			 */ +			list_del(&f->list); +			kfree(f); +		} else { +			f->state = I40E_FILTER_REMOVE; +			vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; +			vsi->back->flags |= I40E_FLAG_FILTER_SYNC; +		}  	}  } @@ -1458,7 +1480,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p)  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw;  	struct sockaddr *addr = p; -	struct i40e_mac_filter *f;  	if (!is_valid_ether_addr(addr->sa_data))  		return -EADDRNOTAVAIL; @@ -1479,52 +1500,23 @@ static int i40e_set_mac(struct net_device *netdev, void *p)  	else  		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); +	spin_lock_bh(&vsi->mac_filter_list_lock); +	i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true); +	i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true); +	spin_unlock_bh(&vsi->mac_filter_list_lock); +	ether_addr_copy(netdev->dev_addr, addr->sa_data);  	if (vsi->type == I40E_VSI_MAIN) {  		i40e_status ret;  		ret = i40e_aq_mac_address_write(&vsi->back->hw,  						I40E_AQC_WRITE_TYPE_LAA_WOL,  						addr->sa_data, NULL); -		if (ret) { -			netdev_info(netdev, -				    "Addr change for Main VSI failed: %d\n", -				    ret); -			return -EADDRNOTAVAIL; -		} -	} - -	if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { -		struct i40e_aqc_remove_macvlan_element_data element; - -		memset(&element, 0, sizeof(element)); -		ether_addr_copy(element.mac_addr, netdev->dev_addr); -		element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; -		i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); -	} else { -		spin_lock_bh(&vsi->mac_filter_list_lock); -		i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, -				false, false); -		spin_unlock_bh(&vsi->mac_filter_list_lock); -	} - -	if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { -		struct i40e_aqc_add_macvlan_element_data element; - -		memset(&element, 0, sizeof(element)); -		ether_addr_copy(element.mac_addr, hw->mac.addr); -		element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); -		i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); -	} else { -		spin_lock_bh(&vsi->mac_filter_list_lock); -		f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, -				    false, false); -		if (f) -			f->is_laa = true; -		spin_unlock_bh(&vsi->mac_filter_list_lock); +		if (ret) +			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", +				    i40e_stat_str(hw, ret), +				    i40e_aq_str(hw, hw->aq.asq_last_status));  	} -	ether_addr_copy(netdev->dev_addr, addr->sa_data); -  	/* schedule our worker thread which will take care of  	 * applying the new filter changes  	 */ @@ -1584,14 +1576,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,  	vsi->tc_config.numtc = numtc;  	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;  	/* Number of queues per enabled TC */ -	/* In MFP case we can have a much lower count of MSIx -	 * vectors available and so we need to lower the used -	 * q count. -	 */ -	if (pf->flags & I40E_FLAG_MSIX_ENABLED) -		qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); -	else -		qcount = vsi->alloc_queue_pairs; +	qcount = vsi->alloc_queue_pairs; +  	num_tc_qps = qcount / numtc;  	num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); @@ -1761,28 +1747,6 @@ bottom_of_search_loop:  }  /** - * i40e_mac_filter_entry_clone - Clones a MAC filter entry - * @src: source MAC filter entry to be clones - * - * Returns the pointer to newly cloned MAC filter entry or NULL - * in case of error - **/ -static struct i40e_mac_filter *i40e_mac_filter_entry_clone( -					struct i40e_mac_filter *src) -{ -	struct i40e_mac_filter *f; - -	f = kzalloc(sizeof(*f), GFP_ATOMIC); -	if (!f) -		return NULL; -	*f = *src; - -	INIT_LIST_HEAD(&f->list); - -	return f; -} - -/**   * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries   * @vsi: pointer to vsi struct   * @from: Pointer to list which contains MAC filter entries - changes to @@ -1796,41 +1760,61 @@ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,  	struct i40e_mac_filter *f, *ftmp;  	list_for_each_entry_safe(f, ftmp, from, list) { -		f->changed = true;  		/* Move the element back into MAC filter list*/  		list_move_tail(&f->list, &vsi->mac_filter_list);  	}  }  /** - * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries - * @vsi: pointer to vsi struct + * i40e_update_filter_state - Update filter state based on return data + * from firmware + * @count: Number of filters added + * @add_list: return data from fw + * @head: pointer to first filter in current batch + * @aq_err: status from fw   * - * MAC filter entries from list were slated to be added from device. + * MAC filter entries from list were slated to be added to device. Returns + * number of successful filters. Note that 0 does NOT mean success!   **/ -static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) +static int +i40e_update_filter_state(int count, +			 struct i40e_aqc_add_macvlan_element_data *add_list, +			 struct i40e_mac_filter *add_head, int aq_err)  { -	struct i40e_mac_filter *f, *ftmp; - -	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { -		if (!f->changed && f->counter) -			f->changed = true; -	} -} +	int retval = 0; +	int i; -/** - * i40e_cleanup_add_list - Deletes the element from add list and release - *			memory - * @add_list: Pointer to list which contains MAC filter entries - **/ -static void i40e_cleanup_add_list(struct list_head *add_list) -{ -	struct i40e_mac_filter *f, *ftmp; -	list_for_each_entry_safe(f, ftmp, add_list, list) { -		list_del(&f->list); -		kfree(f); +	if (!aq_err) { +		retval = count; +		/* Everything's good, mark all filters active. */ +		for (i = 0; i < count ; i++) { +			add_head->state = I40E_FILTER_ACTIVE; +			add_head = list_next_entry(add_head, list); +		} +	} else if (aq_err == I40E_AQ_RC_ENOSPC) { +		/* Device ran out of filter space. Check the return value +		 * for each filter to see which ones are active. +		 */ +		for (i = 0; i < count ; i++) { +			if (add_list[i].match_method == +			    I40E_AQC_MM_ERR_NO_RES) { +				add_head->state = I40E_FILTER_FAILED; +			} else { +				add_head->state = I40E_FILTER_ACTIVE; +				retval++; +			} +			add_head = list_next_entry(add_head, list); +		} +	} else { +		/* Some other horrible thing happened, fail all filters */ +		retval = 0; +		for (i = 0; i < count ; i++) { +			add_head->state = I40E_FILTER_FAILED; +			add_head = list_next_entry(add_head, list); +		}  	} +	return retval;  }  /** @@ -1843,20 +1827,22 @@ static void i40e_cleanup_add_list(struct list_head *add_list)   **/  int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  { -	struct list_head tmp_del_list, tmp_add_list; -	struct i40e_mac_filter *f, *ftmp, *fclone; -	bool promisc_forced_on = false; -	bool add_happened = false; +	struct i40e_mac_filter *f, *ftmp, *add_head = NULL; +	struct list_head tmp_add_list, tmp_del_list; +	struct i40e_hw *hw = &vsi->back->hw; +	bool promisc_changed = false; +	char vsi_name[16] = "PF";  	int filter_list_len = 0;  	u32 changed_flags = 0;  	i40e_status aq_ret = 0; -	bool err_cond = false;  	int retval = 0;  	struct i40e_pf *pf;  	int num_add = 0;  	int num_del = 0;  	int aq_err = 0;  	u16 cmd_flags; +	int list_size; +	int fcnt;  	/* empty array typed pointers, kcalloc later */  	struct i40e_aqc_add_macvlan_element_data *add_list; @@ -1871,72 +1857,46 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  		vsi->current_netdev_flags = vsi->netdev->flags;  	} -	INIT_LIST_HEAD(&tmp_del_list);  	INIT_LIST_HEAD(&tmp_add_list); +	INIT_LIST_HEAD(&tmp_del_list); + +	if (vsi->type == I40E_VSI_SRIOV) +		snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); +	else if (vsi->type != I40E_VSI_MAIN) +		snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);  	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {  		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;  		spin_lock_bh(&vsi->mac_filter_list_lock); +		/* Create a list of filters to delete. */  		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { -			if (!f->changed) -				continue; - -			if (f->counter != 0) -				continue; -			f->changed = false; - -			/* Move the element into temporary del_list */ -			list_move_tail(&f->list, &tmp_del_list); -		} - -		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { -			if (!f->changed) -				continue; - -			if (f->counter == 0) -				continue; -			f->changed = false; - -			/* Clone MAC filter entry and add into temporary list */ -			fclone = i40e_mac_filter_entry_clone(f); -			if (!fclone) { -				err_cond = true; -				break; +			if (f->state == I40E_FILTER_REMOVE) { +				WARN_ON(f->counter != 0); +				/* Move the element into temporary del_list */ +				list_move_tail(&f->list, &tmp_del_list); +				vsi->active_filters--; +			} +			if (f->state == I40E_FILTER_NEW) { +				WARN_ON(f->counter == 0); +				/* Move the element into temporary add_list */ +				list_move_tail(&f->list, &tmp_add_list);  			} -			list_add_tail(&fclone->list, &tmp_add_list); -		} - -		/* if failed to clone MAC filter entry - undo */ -		if (err_cond) { -			i40e_undo_del_filter_entries(vsi, &tmp_del_list); -			i40e_undo_add_filter_entries(vsi);  		}  		spin_unlock_bh(&vsi->mac_filter_list_lock); - -		if (err_cond) { -			i40e_cleanup_add_list(&tmp_add_list); -			retval = -ENOMEM; -			goto out; -		}  	}  	/* Now process 'del_list' outside the lock */  	if (!list_empty(&tmp_del_list)) { -		int del_list_size; - -		filter_list_len = pf->hw.aq.asq_buf_size / +		filter_list_len = hw->aq.asq_buf_size /  			    sizeof(struct i40e_aqc_remove_macvlan_element_data); -		del_list_size = filter_list_len * +		list_size = filter_list_len *  			    sizeof(struct i40e_aqc_remove_macvlan_element_data); -		del_list = kzalloc(del_list_size, GFP_ATOMIC); +		del_list = kzalloc(list_size, GFP_ATOMIC);  		if (!del_list) { -			i40e_cleanup_add_list(&tmp_add_list); -  			/* Undo VSI's MAC filter entry element updates */  			spin_lock_bh(&vsi->mac_filter_list_lock);  			i40e_undo_del_filter_entries(vsi, &tmp_del_list); -			i40e_undo_add_filter_entries(vsi);  			spin_unlock_bh(&vsi->mac_filter_list_lock);  			retval = -ENOMEM;  			goto out; @@ -1947,9 +1907,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  			/* add to delete list */  			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); -			del_list[num_del].vlan_tag = -				cpu_to_le16((u16)(f->vlan == -					    I40E_VLAN_ANY ? 0 : f->vlan)); +			if (f->vlan == I40E_VLAN_ANY) { +				del_list[num_del].vlan_tag = 0; +				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; +			} else { +				del_list[num_del].vlan_tag = +					cpu_to_le16((u16)(f->vlan)); +			}  			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;  			del_list[num_del].flags = cmd_flags; @@ -1957,21 +1921,23 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  			/* flush a full buffer */  			if (num_del == filter_list_len) { -				aq_ret = i40e_aq_remove_macvlan(&pf->hw, -								vsi->seid, +				aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,  								del_list, -								num_del, -								NULL); -				aq_err = pf->hw.aq.asq_last_status; +								num_del, NULL); +				aq_err = hw->aq.asq_last_status;  				num_del = 0; -				memset(del_list, 0, del_list_size); +				memset(del_list, 0, list_size); -				if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { +				/* Explicitly ignore and do not report when +				 * firmware returns ENOENT. +				 */ +				if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {  					retval = -EIO; -					dev_err(&pf->pdev->dev, -						"ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", -						i40e_stat_str(&pf->hw, aq_ret), -						i40e_aq_str(&pf->hw, aq_err)); +					dev_info(&pf->pdev->dev, +						 "ignoring delete macvlan error on %s, err %s, aq_err %s\n", +						 vsi_name, +						 i40e_stat_str(hw, aq_ret), +						 i40e_aq_str(hw, aq_err));  				}  			}  			/* Release memory for MAC filter entries which were @@ -1982,17 +1948,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  		}  		if (num_del) { -			aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, -							del_list, num_del, -							NULL); -			aq_err = pf->hw.aq.asq_last_status; +			aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, +							num_del, NULL); +			aq_err = hw->aq.asq_last_status;  			num_del = 0; -			if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) +			/* Explicitly ignore and do not report when firmware +			 * returns ENOENT. +			 */ +			if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { +				retval = -EIO;  				dev_info(&pf->pdev->dev, -					 "ignoring delete macvlan error, err %s aq_err %s\n", -					 i40e_stat_str(&pf->hw, aq_ret), -					 i40e_aq_str(&pf->hw, aq_err)); +					 "ignoring delete macvlan error on %s, err %s aq_err %s\n", +					 vsi_name, +					 i40e_stat_str(hw, aq_ret), +					 i40e_aq_str(hw, aq_err)); +			}  		}  		kfree(del_list); @@ -2000,84 +1971,117 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  	}  	if (!list_empty(&tmp_add_list)) { -		int add_list_size; - -		/* do all the adds now */ -		filter_list_len = pf->hw.aq.asq_buf_size / -			       sizeof(struct i40e_aqc_add_macvlan_element_data), -		add_list_size = filter_list_len * +		/* Do all the adds now. */ +		filter_list_len = hw->aq.asq_buf_size /  			       sizeof(struct i40e_aqc_add_macvlan_element_data); -		add_list = kzalloc(add_list_size, GFP_ATOMIC); +		list_size = filter_list_len * +			       sizeof(struct i40e_aqc_add_macvlan_element_data); +		add_list = kzalloc(list_size, GFP_ATOMIC);  		if (!add_list) { -			/* Purge element from temporary lists */ -			i40e_cleanup_add_list(&tmp_add_list); - -			/* Undo add filter entries from VSI MAC filter list */ -			spin_lock_bh(&vsi->mac_filter_list_lock); -			i40e_undo_add_filter_entries(vsi); -			spin_unlock_bh(&vsi->mac_filter_list_lock);  			retval = -ENOMEM;  			goto out;  		} - -		list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { - -			add_happened = true; -			cmd_flags = 0; - +		num_add = 0; +		list_for_each_entry(f, &tmp_add_list, list) { +			if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, +				     &vsi->state)) { +				f->state = I40E_FILTER_FAILED; +				continue; +			}  			/* add to add array */ +			if (num_add == 0) +				add_head = f; +			cmd_flags = 0;  			ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); -			add_list[num_add].vlan_tag = -				cpu_to_le16( -				 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); +			if (f->vlan == I40E_VLAN_ANY) { +				add_list[num_add].vlan_tag = 0; +				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; +			} else { +				add_list[num_add].vlan_tag = +					cpu_to_le16((u16)(f->vlan)); +			}  			add_list[num_add].queue_number = 0; -  			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;  			add_list[num_add].flags = cpu_to_le16(cmd_flags);  			num_add++;  			/* flush a full buffer */  			if (num_add == filter_list_len) { -				aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, +				aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,  							     add_list, num_add,  							     NULL); -				aq_err = pf->hw.aq.asq_last_status; +				aq_err = hw->aq.asq_last_status; +				fcnt = i40e_update_filter_state(num_add, +								add_list, +								add_head, +								aq_ret); +				vsi->active_filters += fcnt; + +				if (fcnt != num_add) { +					promisc_changed = true; +					set_bit(__I40E_FILTER_OVERFLOW_PROMISC, +						&vsi->state); +					vsi->promisc_threshold = +						(vsi->active_filters * 3) / 4; +					dev_warn(&pf->pdev->dev, +						 "Error %s adding RX filters on %s, promiscuous mode forced on\n", +						 i40e_aq_str(hw, aq_err), +						 vsi_name); +				} +				memset(add_list, 0, list_size);  				num_add = 0; - -				if (aq_ret) -					break; -				memset(add_list, 0, add_list_size);  			} -			/* Entries from tmp_add_list were cloned from MAC -			 * filter list, hence clean those cloned entries -			 */ -			list_del(&f->list); -			kfree(f);  		} -  		if (num_add) { -			aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, +			aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,  						     add_list, num_add, NULL); -			aq_err = pf->hw.aq.asq_last_status; -			num_add = 0; +			aq_err = hw->aq.asq_last_status; +			fcnt = i40e_update_filter_state(num_add, add_list, +							add_head, aq_ret); +			vsi->active_filters += fcnt; +			if (fcnt != num_add) { +				promisc_changed = true; +				set_bit(__I40E_FILTER_OVERFLOW_PROMISC, +					&vsi->state); +				vsi->promisc_threshold = +						(vsi->active_filters * 3) / 4; +				dev_warn(&pf->pdev->dev, +					 "Error %s adding RX filters on %s, promiscuous mode forced on\n", +					 i40e_aq_str(hw, aq_err), vsi_name); +			} +		} +		/* Now move all of the filters from the temp add list back to +		 * the VSI's list. +		 */ +		spin_lock_bh(&vsi->mac_filter_list_lock); +		list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { +			list_move_tail(&f->list, &vsi->mac_filter_list);  		} +		spin_unlock_bh(&vsi->mac_filter_list_lock);  		kfree(add_list);  		add_list = NULL; +	} -		if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { -			retval = i40e_aq_rc_to_posix(aq_ret, aq_err); +	/* Check to see if we can drop out of overflow promiscuous mode. */ +	if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) && +	    (vsi->active_filters < vsi->promisc_threshold)) { +		int failed_count = 0; +		/* See if we have any failed filters. We can't drop out of +		 * promiscuous until these have all been deleted. +		 */ +		spin_lock_bh(&vsi->mac_filter_list_lock); +		list_for_each_entry(f, &vsi->mac_filter_list, list) { +			if (f->state == I40E_FILTER_FAILED) +				failed_count++; +		} +		spin_unlock_bh(&vsi->mac_filter_list_lock); +		if (!failed_count) {  			dev_info(&pf->pdev->dev, -				 "add filter failed, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, aq_ret), -				 i40e_aq_str(&pf->hw, aq_err)); -			if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && -			    !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, -				      &vsi->state)) { -				promisc_forced_on = true; -				set_bit(__I40E_FILTER_OVERFLOW_PROMISC, -					&vsi->state); -				dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); -			} +				 "filter logjam cleared on %s, leaving overflow promiscuous mode\n", +				 vsi_name); +			clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); +			promisc_changed = true; +			vsi->promisc_threshold = 0;  		}  	} @@ -2098,15 +2102,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  							       NULL);  		if (aq_ret) {  			retval = i40e_aq_rc_to_posix(aq_ret, -						     pf->hw.aq.asq_last_status); +						     hw->aq.asq_last_status);  			dev_info(&pf->pdev->dev, -				 "set multi promisc failed, err %s aq_err %s\n", -				 i40e_stat_str(&pf->hw, aq_ret), -				 i40e_aq_str(&pf->hw, -					     pf->hw.aq.asq_last_status)); +				 "set multi promisc failed on %s, err %s aq_err %s\n", +				 vsi_name, +				 i40e_stat_str(hw, aq_ret), +				 i40e_aq_str(hw, hw->aq.asq_last_status));  		}  	} -	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { +	if ((changed_flags & IFF_PROMISC) || +	    (promisc_changed && +	     test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {  		bool cur_promisc;  		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || @@ -2122,33 +2128,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  			 */  			if (pf->cur_promisc != cur_promisc) {  				pf->cur_promisc = cur_promisc; -				set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); +				if (cur_promisc) +					aq_ret = +					      i40e_aq_set_default_vsi(hw, +								      vsi->seid, +								      NULL); +				else +					aq_ret = +					    i40e_aq_clear_default_vsi(hw, +								      vsi->seid, +								      NULL); +				if (aq_ret) { +					retval = i40e_aq_rc_to_posix(aq_ret, +							hw->aq.asq_last_status); +					dev_info(&pf->pdev->dev, +						 "Set default VSI failed on %s, err %s, aq_err %s\n", +						 vsi_name, +						 i40e_stat_str(hw, aq_ret), +						 i40e_aq_str(hw, +						     hw->aq.asq_last_status)); +				}  			}  		} else {  			aq_ret = i40e_aq_set_vsi_unicast_promiscuous( -							  &vsi->back->hw, +							  hw,  							  vsi->seid,  							  cur_promisc, NULL,  							  true);  			if (aq_ret) {  				retval =  				i40e_aq_rc_to_posix(aq_ret, -						    pf->hw.aq.asq_last_status); +						    hw->aq.asq_last_status);  				dev_info(&pf->pdev->dev, -					 "set unicast promisc failed, err %d, aq_err %d\n", -					 aq_ret, pf->hw.aq.asq_last_status); +					 "set unicast promisc failed on %s, err %s, aq_err %s\n", +					 vsi_name, +					 i40e_stat_str(hw, aq_ret), +					 i40e_aq_str(hw, +						     hw->aq.asq_last_status));  			}  			aq_ret = i40e_aq_set_vsi_multicast_promiscuous( -							  &vsi->back->hw, +							  hw,  							  vsi->seid,  							  cur_promisc, NULL);  			if (aq_ret) {  				retval =  				i40e_aq_rc_to_posix(aq_ret, -						    pf->hw.aq.asq_last_status); +						    hw->aq.asq_last_status);  				dev_info(&pf->pdev->dev, -					 "set multicast promisc failed, err %d, aq_err %d\n", -					 aq_ret, pf->hw.aq.asq_last_status); +					 "set multicast promisc failed on %s, err %s, aq_err %s\n", +					 vsi_name, +					 i40e_stat_str(hw, aq_ret), +					 i40e_aq_str(hw, +						     hw->aq.asq_last_status));  			}  		}  		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, @@ -2159,9 +2190,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  						     pf->hw.aq.asq_last_status);  			dev_info(&pf->pdev->dev,  				 "set brdcast promisc failed, err %s, aq_err %s\n", -				 i40e_stat_str(&pf->hw, aq_ret), -				 i40e_aq_str(&pf->hw, -					     pf->hw.aq.asq_last_status)); +					 i40e_stat_str(hw, aq_ret), +					 i40e_aq_str(hw, +						     hw->aq.asq_last_status));  		}  	}  out: @@ -2330,7 +2361,7 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)   **/  int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  { -	struct i40e_mac_filter *f, *add_f; +	struct i40e_mac_filter *f, *ftmp, *add_f;  	bool is_netdev, is_vf;  	is_vf = (vsi->type == I40E_VSI_SRIOV); @@ -2351,7 +2382,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  		}  	} -	list_for_each_entry(f, &vsi->mac_filter_list, list) { +	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {  		add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);  		if (!add_f) {  			dev_info(&vsi->back->pdev->dev, @@ -2365,7 +2396,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  	/* Now if we add a vlan tag, make sure to check if it is the first  	 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"  	 * with 0, so we now accept untagged and specified tagged traffic -	 * (and not any taged and untagged) +	 * (and not all tags along with untagged)  	 */  	if (vid > 0) {  		if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, @@ -2387,7 +2418,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  	/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */  	if (vid > 0 && !vsi->info.pvid) { -		list_for_each_entry(f, &vsi->mac_filter_list, list) { +		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {  			if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,  					      is_vf, is_netdev))  				continue; @@ -2424,7 +2455,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)  {  	struct net_device *netdev = vsi->netdev; -	struct i40e_mac_filter *f, *add_f; +	struct i40e_mac_filter *f, *ftmp, *add_f;  	bool is_vf, is_netdev;  	int filter_count = 0; @@ -2437,7 +2468,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)  	if (is_netdev)  		i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); -	list_for_each_entry(f, &vsi->mac_filter_list, list) +	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)  		i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);  	/* go through all the filters for this VSI and if there is only @@ -2470,7 +2501,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)  	}  	if (!filter_count) { -		list_for_each_entry(f, &vsi->mac_filter_list, list) { +		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {  			i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);  			add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,  						is_vf, is_netdev); @@ -2515,8 +2546,6 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,  	if (vid > 4095)  		return -EINVAL; -	netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); -  	/* If the network stack called us with vid = 0 then  	 * it is asking to receive priority tagged packets with  	 * vlan id 0.  Our HW receives them by default when configured @@ -2550,8 +2579,6 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi; -	netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); -  	/* return code is ignored as there is nothing a user  	 * can do about failure to remove and a log message was  	 * already printed from the other function @@ -2564,6 +2591,44 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,  }  /** + * i40e_macaddr_init - explicitly write the mac address filters + * + * @vsi: pointer to the vsi + * @macaddr: the MAC address + * + * This is needed when the macaddr has been obtained by other + * means than the default, e.g., from Open Firmware or IDPROM. + * Returns 0 on success, negative on failure + **/ +static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) +{ +	int ret; +	struct i40e_aqc_add_macvlan_element_data element; + +	ret = i40e_aq_mac_address_write(&vsi->back->hw, +					I40E_AQC_WRITE_TYPE_LAA_WOL, +					macaddr, NULL); +	if (ret) { +		dev_info(&vsi->back->pdev->dev, +			 "Addr change for VSI failed: %d\n", ret); +		return -EADDRNOTAVAIL; +	} + +	memset(&element, 0, sizeof(element)); +	ether_addr_copy(element.mac_addr, macaddr); +	element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); +	ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); +	if (ret) { +		dev_info(&vsi->back->pdev->dev, +			 "add filter failed err %s aq_err %s\n", +			 i40e_stat_str(&vsi->back->hw, ret), +			 i40e_aq_str(&vsi->back->hw, +				     vsi->back->hw.aq.asq_last_status)); +	} +	return ret; +} + +/**   * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up   * @vsi: the vsi being brought back up   **/ @@ -3009,8 +3074,19 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)   **/  static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)  { +	struct i40e_pf *pf = vsi->back; +	int err; +  	if (vsi->netdev)  		i40e_set_rx_mode(vsi->netdev); + +	if (!!(pf->flags & I40E_FLAG_PF_MAC)) { +		err = i40e_macaddr_init(vsi, pf->hw.mac.addr); +		if (err) { +			dev_warn(&pf->pdev->dev, +				 "could not set up macaddr; err %d\n", err); +		} +	}  }  /** @@ -3952,6 +4028,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)  			/* clear the affinity_mask in the IRQ descriptor */  			irq_set_affinity_hint(pf->msix_entries[vector].vector,  					      NULL); +			synchronize_irq(pf->msix_entries[vector].vector);  			free_irq(pf->msix_entries[vector].vector,  				 vsi->q_vectors[i]); @@ -4958,7 +5035,6 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)  			if (pf->vsi[v]->netdev)  				i40e_dcbnl_set_all(pf->vsi[v]);  		} -		i40e_notify_client_of_l2_param_changes(pf->vsi[v]);  	}  } @@ -5183,12 +5259,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)  		usleep_range(1000, 2000);  	i40e_down(vsi); -	/* Give a VF some time to respond to the reset.  The -	 * two second wait is based upon the watchdog cycle in -	 * the VF driver. -	 */ -	if (vsi->type == I40E_VSI_SRIOV) -		msleep(2000);  	i40e_up(vsi);  	clear_bit(__I40E_CONFIG_BUSY, &pf->state);  } @@ -5231,6 +5301,9 @@ void i40e_down(struct i40e_vsi *vsi)  		i40e_clean_tx_ring(vsi->tx_rings[i]);  		i40e_clean_rx_ring(vsi->rx_rings[i]);  	} + +	i40e_notify_client_of_netdev_close(vsi, false); +  }  /** @@ -5342,14 +5415,7 @@ int i40e_open(struct net_device *netdev)  						       TCP_FLAG_CWR) >> 16);  	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); -#ifdef CONFIG_I40E_VXLAN -	vxlan_get_rx_port(netdev); -#endif -#ifdef CONFIG_I40E_GENEVE -	if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) -		geneve_get_rx_port(netdev); -#endif - +	udp_tunnel_get_rx_info(netdev);  	i40e_notify_client_of_netdev_open(vsi);  	return 0; @@ -5716,6 +5782,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,  		i40e_service_event_schedule(pf);  	} else {  		i40e_pf_unquiesce_all_vsi(pf); +		/* Notify the client for the DCB changes */ +		i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);  	}  exit: @@ -5940,7 +6008,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)  		if (I40E_DEBUG_FD & pf->hw.debug_mask)  			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");  	} -  }  /** @@ -7057,7 +7124,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)   **/  static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)  { -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)  	struct i40e_hw *hw = &pf->hw;  	i40e_status ret;  	__be16 port; @@ -7092,7 +7158,6 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)  			}  		}  	} -#endif  }  /** @@ -7174,7 +7239,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)  		vsi->alloc_queue_pairs = 1;  		vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,  				      I40E_REQ_DESCRIPTOR_MULTIPLE); -		vsi->num_q_vectors = 1; +		vsi->num_q_vectors = pf->num_fdsb_msix;  		break;  	case I40E_VSI_VMDQ2: @@ -7558,9 +7623,11 @@ static int i40e_init_msix(struct i40e_pf *pf)  	/* reserve one vector for sideband flow director */  	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {  		if (vectors_left) { +			pf->num_fdsb_msix = 1;  			v_budget++;  			vectors_left--;  		} else { +			pf->num_fdsb_msix = 0;  			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;  		}  	} @@ -7726,10 +7793,11 @@ static int i40e_init_msix(struct i40e_pf *pf)   * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector   * @vsi: the VSI being configured   * @v_idx: index of the vector in the vsi struct + * @cpu: cpu to be used on affinity_mask   *   * We allocate one q_vector.  If allocation fails we return -ENOMEM.   **/ -static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)  {  	struct i40e_q_vector *q_vector; @@ -7740,7 +7808,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)  	q_vector->vsi = vsi;  	q_vector->v_idx = v_idx; -	cpumask_set_cpu(v_idx, &q_vector->affinity_mask); +	cpumask_set_cpu(cpu, &q_vector->affinity_mask); +  	if (vsi->netdev)  		netif_napi_add(vsi->netdev, &q_vector->napi,  			       i40e_napi_poll, NAPI_POLL_WEIGHT); @@ -7764,8 +7833,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)  static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)  {  	struct i40e_pf *pf = vsi->back; -	int v_idx, num_q_vectors; -	int err; +	int err, v_idx, num_q_vectors, current_cpu;  	/* if not MSIX, give the one vector only to the LAN VSI */  	if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@ -7775,10 +7843,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)  	else  		return -EINVAL; +	current_cpu = cpumask_first(cpu_online_mask); +  	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { -		err = i40e_vsi_alloc_q_vector(vsi, v_idx); +		err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);  		if (err)  			goto err_out; +		current_cpu = cpumask_next(current_cpu, cpu_online_mask); +		if (unlikely(current_cpu >= nr_cpu_ids)) +			current_cpu = cpumask_first(cpu_online_mask);  	}  	return 0; @@ -7905,7 +7978,6 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,  	u8 *rss_lut;  	int ret, i; -	memset(&rss_key, 0, sizeof(rss_key));  	memcpy(&rss_key, seed, sizeof(rss_key));  	rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); @@ -8579,7 +8651,9 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)  		/* Enable filters and mark for reset */  		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))  			need_reset = true; -		pf->flags |= I40E_FLAG_FD_SB_ENABLED; +		/* enable FD_SB only if there is MSI-X vector */ +		if (pf->num_fdsb_msix > 0) +			pf->flags |= I40E_FLAG_FD_SB_ENABLED;  	} else {  		/* turn off filters, mark for reset and clear SW filter list */  		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@ -8628,7 +8702,6 @@ static int i40e_set_features(struct net_device *netdev,  	return 0;  } -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)  /**   * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port   * @pf: board private structure @@ -8648,21 +8721,18 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)  	return i;  } -#endif - -#if IS_ENABLED(CONFIG_VXLAN)  /** - * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up   * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information   **/ -static void i40e_add_vxlan_port(struct net_device *netdev, -				sa_family_t sa_family, __be16 port) +static void i40e_udp_tunnel_add(struct net_device *netdev, +				struct udp_tunnel_info *ti)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back; +	__be16 port = ti->port;  	u8 next_idx;  	u8 idx; @@ -8670,7 +8740,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,  	/* Check if port already exists */  	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { -		netdev_info(netdev, "vxlan port %d already offloaded\n", +		netdev_info(netdev, "port %d already offloaded\n",  			    ntohs(port));  		return;  	} @@ -8679,131 +8749,75 @@ static void i40e_add_vxlan_port(struct net_device *netdev,  	next_idx = i40e_get_udp_port_idx(pf, 0);  	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { -		netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", +		netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",  			    ntohs(port));  		return;  	} -	/* New port: add it and mark its index in the bitmap */ -	pf->udp_ports[next_idx].index = port; -	pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; -	pf->pending_udp_bitmap |= BIT_ULL(next_idx); -	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; -} - -/** - * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away - * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to - **/ -static void i40e_del_vxlan_port(struct net_device *netdev, -				sa_family_t sa_family, __be16 port) -{ -	struct i40e_netdev_priv *np = netdev_priv(netdev); -	struct i40e_vsi *vsi = np->vsi; -	struct i40e_pf *pf = vsi->back; -	u8 idx; - -	idx = i40e_get_udp_port_idx(pf, port); - -	/* Check if port already exists */ -	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { -		/* if port exists, set it to 0 (mark for deletion) -		 * and make it pending -		 */ -		pf->udp_ports[idx].index = 0; -		pf->pending_udp_bitmap |= BIT_ULL(idx); -		pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; -	} else { -		netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", -			    ntohs(port)); -	} -} -#endif - -#if IS_ENABLED(CONFIG_GENEVE) -/** - * i40e_add_geneve_port - Get notifications about GENEVE ports that come up - * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: New UDP port number that GENEVE started listening to - **/ -static void i40e_add_geneve_port(struct net_device *netdev, -				 sa_family_t sa_family, __be16 port) -{ -	struct i40e_netdev_priv *np = netdev_priv(netdev); -	struct i40e_vsi *vsi = np->vsi; -	struct i40e_pf *pf = vsi->back; -	u8 next_idx; -	u8 idx; - -	if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) -		return; - -	idx = i40e_get_udp_port_idx(pf, port); - -	/* Check if port already exists */ -	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { -		netdev_info(netdev, "udp port %d already offloaded\n", -			    ntohs(port)); -		return; -	} - -	/* Now check if there is space to add the new port */ -	next_idx = i40e_get_udp_port_idx(pf, 0); - -	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { -		netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", -			    ntohs(port)); +	switch (ti->type) { +	case UDP_TUNNEL_TYPE_VXLAN: +		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; +		break; +	case UDP_TUNNEL_TYPE_GENEVE: +		if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) +			return; +		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; +		break; +	default:  		return;  	}  	/* New port: add it and mark its index in the bitmap */  	pf->udp_ports[next_idx].index = port; -	pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;  	pf->pending_udp_bitmap |= BIT_ULL(next_idx);  	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - -	dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));  }  /** - * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away   * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: UDP port number that GENEVE stopped listening to + * @ti: Tunnel endpoint information   **/ -static void i40e_del_geneve_port(struct net_device *netdev, -				 sa_family_t sa_family, __be16 port) +static void i40e_udp_tunnel_del(struct net_device *netdev, +				struct udp_tunnel_info *ti)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back; +	__be16 port = ti->port;  	u8 idx; -	if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) -		return; -  	idx = i40e_get_udp_port_idx(pf, port);  	/* Check if port already exists */ -	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { -		/* if port exists, set it to 0 (mark for deletion) -		 * and make it pending -		 */ -		pf->udp_ports[idx].index = 0; -		pf->pending_udp_bitmap |= BIT_ULL(idx); -		pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; +	if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) +		goto not_found; -		dev_info(&pf->pdev->dev, "deleting geneve port %d\n", -			 ntohs(port)); -	} else { -		netdev_warn(netdev, "geneve port %d was not found, not deleting\n", -			    ntohs(port)); +	switch (ti->type) { +	case UDP_TUNNEL_TYPE_VXLAN: +		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) +			goto not_found; +		break; +	case UDP_TUNNEL_TYPE_GENEVE: +		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) +			goto not_found; +		break; +	default: +		goto not_found;  	} + +	/* if port exists, set it to 0 (mark for deletion) +	 * and make it pending +	 */ +	pf->udp_ports[idx].index = 0; +	pf->pending_udp_bitmap |= BIT_ULL(idx); +	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + +	return; +not_found: +	netdev_warn(netdev, "UDP port %d was not found, not deleting\n", +		    ntohs(port));  } -#endif  static int i40e_get_phys_port_id(struct net_device *netdev,  				 struct netdev_phys_item_id *ppid) @@ -9033,14 +9047,8 @@ static const struct net_device_ops i40e_netdev_ops = {  	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,  	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,  	.ndo_set_vf_trust	= i40e_ndo_set_vf_trust, -#if IS_ENABLED(CONFIG_VXLAN) -	.ndo_add_vxlan_port	= i40e_add_vxlan_port, -	.ndo_del_vxlan_port	= i40e_del_vxlan_port, -#endif -#if IS_ENABLED(CONFIG_GENEVE) -	.ndo_add_geneve_port	= i40e_add_geneve_port, -	.ndo_del_geneve_port	= i40e_del_geneve_port, -#endif +	.ndo_udp_tunnel_add	= i40e_udp_tunnel_add, +	.ndo_udp_tunnel_del	= i40e_udp_tunnel_del,  	.ndo_get_phys_port_id	= i40e_get_phys_port_id,  	.ndo_fdb_add		= i40e_ndo_fdb_add,  	.ndo_features_check	= i40e_features_check, @@ -9056,7 +9064,6 @@ static const struct net_device_ops i40e_netdev_ops = {   **/  static int i40e_config_netdev(struct i40e_vsi *vsi)  { -	u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw;  	struct i40e_netdev_priv *np; @@ -9120,18 +9127,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)  		 * default a MAC-VLAN filter that accepts any tagged packet  		 * which must be replaced by a normal filter.  		 */ -		if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { -			spin_lock_bh(&vsi->mac_filter_list_lock); -			i40e_add_filter(vsi, mac_addr, -					I40E_VLAN_ANY, false, true); -			spin_unlock_bh(&vsi->mac_filter_list_lock); -		} -	} else if ((pf->hw.aq.api_maj_ver > 1) || -		   ((pf->hw.aq.api_maj_ver == 1) && -		    (pf->hw.aq.api_min_ver > 4))) { -		/* Supported in FW API version higher than 1.4 */ -		pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; -		pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; +		i40e_rm_default_mac_filter(vsi, mac_addr); +		spin_lock_bh(&vsi->mac_filter_list_lock); +		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); +		spin_unlock_bh(&vsi->mac_filter_list_lock);  	} else {  		/* relate the VSI_VMDQ name to the VSI_MAIN name */  		snprintf(netdev->name, IFNAMSIZ, "%sv%%d", @@ -9143,10 +9142,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)  		spin_unlock_bh(&vsi->mac_filter_list_lock);  	} -	spin_lock_bh(&vsi->mac_filter_list_lock); -	i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); -	spin_unlock_bh(&vsi->mac_filter_list_lock); -  	ether_addr_copy(netdev->dev_addr, mac_addr);  	ether_addr_copy(netdev->perm_addr, mac_addr); @@ -9224,8 +9219,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)  static int i40e_add_vsi(struct i40e_vsi *vsi)  {  	int ret = -ENODEV; -	u8 laa_macaddr[ETH_ALEN]; -	bool found_laa_mac_filter = false; +	i40e_status aq_ret = 0;  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw;  	struct i40e_vsi_context ctxt; @@ -9413,42 +9407,29 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  		vsi->seid = ctxt.seid;  		vsi->id = ctxt.vsi_number;  	} +	/* Except FDIR VSI, for all othet VSI set the broadcast filter */ +	if (vsi->type != I40E_VSI_FDIR) { +		aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL); +		if (aq_ret) { +			ret = i40e_aq_rc_to_posix(aq_ret, +						  hw->aq.asq_last_status); +			dev_info(&pf->pdev->dev, +				 "set brdcast promisc failed, err %s, aq_err %s\n", +				 i40e_stat_str(hw, aq_ret), +				 i40e_aq_str(hw, hw->aq.asq_last_status)); +		} +	} +	vsi->active_filters = 0; +	clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);  	spin_lock_bh(&vsi->mac_filter_list_lock);  	/* If macvlan filters already exist, force them to get loaded */  	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { -		f->changed = true; +		f->state = I40E_FILTER_NEW;  		f_count++; - -		/* Expected to have only one MAC filter entry for LAA in list */ -		if (f->is_laa && vsi->type == I40E_VSI_MAIN) { -			ether_addr_copy(laa_macaddr, f->macaddr); -			found_laa_mac_filter = true; -		}  	}  	spin_unlock_bh(&vsi->mac_filter_list_lock); -	if (found_laa_mac_filter) { -		struct i40e_aqc_remove_macvlan_element_data element; - -		memset(&element, 0, sizeof(element)); -		ether_addr_copy(element.mac_addr, laa_macaddr); -		element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; -		ret = i40e_aq_remove_macvlan(hw, vsi->seid, -					     &element, 1, NULL); -		if (ret) { -			/* some older FW has a different default */ -			element.flags |= -				       I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; -			i40e_aq_remove_macvlan(hw, vsi->seid, -					       &element, 1, NULL); -		} - -		i40e_aq_mac_address_write(hw, -					  I40E_AQC_WRITE_TYPE_LAA_WOL, -					  laa_macaddr, NULL); -	} -  	if (f_count) {  		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;  		pf->flags |= I40E_FLAG_FILTER_SYNC; @@ -9659,6 +9640,8 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)  	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;  	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;  	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); +	if (vsi->type == I40E_VSI_MAIN) +		i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);  	/* assign it some queues */  	ret = i40e_alloc_rings(vsi); @@ -9684,44 +9667,6 @@ err_vsi:  }  /** - * i40e_macaddr_init - explicitly write the mac address filters. - * - * @vsi: pointer to the vsi. - * @macaddr: the MAC address - * - * This is needed when the macaddr has been obtained by other - * means than the default, e.g., from Open Firmware or IDPROM. - * Returns 0 on success, negative on failure - **/ -static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) -{ -	int ret; -	struct i40e_aqc_add_macvlan_element_data element; - -	ret = i40e_aq_mac_address_write(&vsi->back->hw, -					I40E_AQC_WRITE_TYPE_LAA_WOL, -					macaddr, NULL); -	if (ret) { -		dev_info(&vsi->back->pdev->dev, -			 "Addr change for VSI failed: %d\n", ret); -		return -EADDRNOTAVAIL; -	} - -	memset(&element, 0, sizeof(element)); -	ether_addr_copy(element.mac_addr, macaddr); -	element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); -	ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); -	if (ret) { -		dev_info(&vsi->back->pdev->dev, -			 "add filter failed err %s aq_err %s\n", -			 i40e_stat_str(&vsi->back->hw, ret), -			 i40e_aq_str(&vsi->back->hw, -				     vsi->back->hw.aq.asq_last_status)); -	} -	return ret; -} - -/**   * i40e_vsi_setup - Set up a VSI by a given type   * @pf: board private structure   * @type: VSI type @@ -10133,14 +10078,14 @@ void i40e_veb_release(struct i40e_veb *veb)  static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)  {  	struct i40e_pf *pf = veb->pf; -	bool is_default = veb->pf->cur_promisc;  	bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);  	int ret; -	/* get a VEB from the hardware */  	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, -			      veb->enabled_tc, is_default, +			      veb->enabled_tc, false,  			      &veb->seid, enable_stats, NULL); + +	/* get a VEB from the hardware */  	if (ret) {  		dev_info(&pf->pdev->dev,  			 "couldn't add VEB, err %s aq_err %s\n", @@ -10689,12 +10634,8 @@ static void i40e_print_features(struct i40e_pf *pf)  	}  	if (pf->flags & I40E_FLAG_DCB_CAPABLE)  		i += snprintf(&buf[i], REMAIN(i), " DCB"); -#if IS_ENABLED(CONFIG_VXLAN)  	i += snprintf(&buf[i], REMAIN(i), " VxLAN"); -#endif -#if IS_ENABLED(CONFIG_GENEVE)  	i += snprintf(&buf[i], REMAIN(i), " Geneve"); -#endif  	if (pf->flags & I40E_FLAG_PTP)  		i += snprintf(&buf[i], REMAIN(i), " PTP");  #ifdef I40E_FCOE @@ -11525,6 +11466,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)  {  	struct i40e_pf *pf = pci_get_drvdata(pdev);  	struct i40e_hw *hw = &pf->hw; +	int retval = 0;  	set_bit(__I40E_SUSPENDED, &pf->state);  	set_bit(__I40E_DOWN, &pf->state); @@ -11536,10 +11478,16 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)  	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));  	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); +	i40e_stop_misc_vector(pf); + +	retval = pci_save_state(pdev); +	if (retval) +		return retval; +  	pci_wake_from_d3(pdev, pf->wol_en);  	pci_set_power_state(pdev, PCI_D3hot); -	return 0; +	return retval;  }  /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 80403c6ee7f0..4660c5abc855 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -98,6 +98,8 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,  				struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, +				      struct i40e_asq_cmd_details *cmd_details);  enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,  			bool qualified_modules, bool report_init,  			struct i40e_aq_get_phy_abilities_resp *abilities, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 55f151fca1dc..df7ecc9578c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -740,14 +740,12 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,  	tx_ring->q_vector->tx.total_packets += total_packets;  	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { -		unsigned int j = 0; -  		/* check to see if there are < 4 descriptors  		 * waiting to be written back, then kick the hardware to force  		 * them to be written back in case we stay in NAPI.  		 * In this mode on X722 we do not enable Interrupt.  		 */ -		j = i40e_get_tx_pending(tx_ring, false); +		unsigned int j = i40e_get_tx_pending(tx_ring, false);  		if (budget &&  		    ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && @@ -1280,8 +1278,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,  				    union i40e_rx_desc *rx_desc)  {  	struct i40e_rx_ptype_decoded decoded; -	bool ipv4, ipv6, tunnel = false;  	u32 rx_error, rx_status; +	bool ipv4, ipv6;  	u8 ptype;  	u64 qword; @@ -1336,19 +1334,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,  	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))  		return; -	/* The hardware supported by this driver does not validate outer -	 * checksums for tunneled VXLAN or GENEVE frames.  I don't agree -	 * with it but the specification states that you "MAY validate", it -	 * doesn't make it a hard requirement so if we have validated the -	 * inner checksum report CHECKSUM_UNNECESSARY. +	/* If there is an outer header present that might contain a checksum +	 * we need to bump the checksum level by 1 to reflect the fact that +	 * we are indicating we validated the inner checksum.  	 */ -	if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | -				  I40E_RX_PTYPE_INNER_PROT_UDP | -				  I40E_RX_PTYPE_INNER_PROT_SCTP)) -		tunnel = true; - -	skb->ip_summed = CHECKSUM_UNNECESSARY; -	skb->csum_level = tunnel ? 1 : 0; +	if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) +		skb->csum_level = 1; + +	/* Only report checksum unnecessary for TCP, UDP, or SCTP */ +	switch (decoded.inner_prot) { +	case I40E_RX_PTYPE_INNER_PROT_TCP: +	case I40E_RX_PTYPE_INNER_PROT_UDP: +	case I40E_RX_PTYPE_INNER_PROT_SCTP: +		skb->ip_summed = CHECKSUM_UNNECESSARY; +		/* fall though */ +	default: +		break; +	}  	return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1fcafcfa8f14..6fcbf764f32b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -665,6 +665,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)  		goto error_alloc_vsi_res;  	}  	if (type == I40E_VSI_SRIOV) { +		u64 hena = i40e_pf_get_default_rss_hena(pf); +  		vf->lan_vsi_idx = vsi->idx;  		vf->lan_vsi_id = vsi->id;  		/* If the port VLAN has been configured and then the @@ -687,6 +689,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)  					vf->default_lan_addr.addr, vf->vf_id);  		}  		spin_unlock_bh(&vsi->mac_filter_list_lock); +		i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), +				  (u32)hena); +		i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), +				  (u32)(hena >> 32));  	}  	/* program mac filter */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 8f64204000fb..4db0c0326185 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -59,7 +59,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)  		case I40E_DEV_ID_1G_BASE_T_X722:  		case I40E_DEV_ID_10G_BASE_T_X722:  		case I40E_DEV_ID_SFP_I_X722: -		case I40E_DEV_ID_QSFP_I_X722:  			hw->mac.type = I40E_MAC_X722;  			break;  		case I40E_DEV_ID_X722_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index d34972bab09c..70235706915e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -45,7 +45,6 @@  #define I40E_DEV_ID_1G_BASE_T_X722	0x37D1  #define I40E_DEV_ID_10G_BASE_T_X722	0x37D2  #define I40E_DEV_ID_SFP_I_X722		0x37D3 -#define I40E_DEV_ID_QSFP_I_X722		0x37D4  #define I40E_DEV_ID_X722_VF		0x37CD  #define I40E_DEV_ID_X722_VF_HV		0x37D9 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index be99189da925..a579193b2c21 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -259,13 +259,12 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,  	tx_ring->q_vector->tx.total_packets += total_packets;  	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { -		unsigned int j = 0;  		/* check to see if there are < 4 descriptors  		 * waiting to be written back, then kick the hardware to force  		 * them to be written back in case we stay in NAPI.  		 * In this mode on X722 we do not enable Interrupt.  		 */ -		j = i40evf_get_tx_pending(tx_ring, false); +		unsigned int j = i40evf_get_tx_pending(tx_ring, false);  		if (budget &&  		    ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && @@ -752,8 +751,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,  				    union i40e_rx_desc *rx_desc)  {  	struct i40e_rx_ptype_decoded decoded; -	bool ipv4, ipv6, tunnel = false;  	u32 rx_error, rx_status; +	bool ipv4, ipv6;  	u8 ptype;  	u64 qword; @@ -808,19 +807,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,  	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))  		return; -	/* The hardware supported by this driver does not validate outer -	 * checksums for tunneled VXLAN or GENEVE frames.  I don't agree -	 * with it but the specification states that you "MAY validate", it -	 * doesn't make it a hard requirement so if we have validated the -	 * inner checksum report CHECKSUM_UNNECESSARY. +	/* If there is an outer header present that might contain a checksum +	 * we need to bump the checksum level by 1 to reflect the fact that +	 * we are indicating we validated the inner checksum.  	 */ -	if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | -				  I40E_RX_PTYPE_INNER_PROT_UDP | -				  I40E_RX_PTYPE_INNER_PROT_SCTP)) -		tunnel = true; - -	skb->ip_summed = CHECKSUM_UNNECESSARY; -	skb->csum_level = tunnel ? 1 : 0; +	if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) +		skb->csum_level = 1; + +	/* Only report checksum unnecessary for TCP, UDP, or SCTP */ +	switch (decoded.inner_prot) { +	case I40E_RX_PTYPE_INNER_PROT_TCP: +	case I40E_RX_PTYPE_INNER_PROT_UDP: +	case I40E_RX_PTYPE_INNER_PROT_SCTP: +		skb->ip_summed = CHECKSUM_UNNECESSARY; +		/* fall though */ +	default: +		break; +	}  	return; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 16c552952860..600fb9c4a7f0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =  #define DRV_KERN "-k"  #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 10 +#define DRV_VERSION_MINOR 6 +#define DRV_VERSION_BUILD 11  #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \  	     __stringify(DRV_VERSION_MINOR) "." \  	     __stringify(DRV_VERSION_BUILD) \ @@ -57,7 +57,9 @@ static const char i40evf_copyright[] =   */  static const struct pci_device_id i40evf_pci_tbl[] = {  	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},  	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0},  	/* required last entry */  	{0, }  }; @@ -825,7 +827,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,  		ether_addr_copy(f->macaddr, macaddr); -		list_add(&f->list, &adapter->mac_filter_list); +		list_add_tail(&f->list, &adapter->mac_filter_list);  		f->add = true;  		adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;  	} diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index f13445691507..d76c221d4c8a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -434,6 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)  			ether_addr_copy(veal->list[i].addr, f->macaddr);  			i++;  			f->add = false; +			if (i == count) +				break;  		}  	}  	if (!more) @@ -497,6 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)  			i++;  			list_del(&f->list);  			kfree(f); +			if (i == count) +				break;  		}  	}  	if (!more) @@ -560,6 +564,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)  			vvfl->vlan_id[i] = f->vlan;  			i++;  			f->add = false; +			if (i == count) +				break;  		}  	}  	if (!more) @@ -623,6 +629,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)  			i++;  			list_del(&f->list);  			kfree(f); +			if (i == count) +				break;  		}  	}  	if (!more) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index b9609afa5ca3..5387b3a96489 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -445,6 +445,7 @@ struct igb_adapter {  	unsigned long ptp_tx_start;  	unsigned long last_rx_ptp_check;  	unsigned long last_rx_timestamp; +	unsigned int ptp_flags;  	spinlock_t tmreg_lock;  	struct cyclecounter cc;  	struct timecounter tc; @@ -474,12 +475,15 @@ struct igb_adapter {  	u16 eee_advert;  }; +/* flags controlling PTP/1588 function */ +#define IGB_PTP_ENABLED		BIT(0) +#define IGB_PTP_OVERFLOW_CHECK	BIT(1) +  #define IGB_FLAG_HAS_MSI		BIT(0)  #define IGB_FLAG_DCA_ENABLED		BIT(1)  #define IGB_FLAG_QUAD_PORT_A		BIT(2)  #define IGB_FLAG_QUEUE_PAIRS		BIT(3)  #define IGB_FLAG_DMAC			BIT(4) -#define IGB_FLAG_PTP			BIT(5)  #define IGB_FLAG_RSS_FIELD_IPV4_UDP	BIT(6)  #define IGB_FLAG_RSS_FIELD_IPV6_UDP	BIT(7)  #define IGB_FLAG_WOL_SUPPORTED		BIT(8) @@ -546,6 +550,7 @@ void igb_set_fw_version(struct igb_adapter *);  void igb_ptp_init(struct igb_adapter *adapter);  void igb_ptp_stop(struct igb_adapter *adapter);  void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_suspend(struct igb_adapter *adapter);  void igb_ptp_rx_hang(struct igb_adapter *adapter);  void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);  void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ef3d642f5ff2..9bcba42abb91 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2027,7 +2027,8 @@ void igb_reset(struct igb_adapter *adapter)  	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);  	/* Re-enable PTP, where applicable. */ -	igb_ptp_reset(adapter); +	if (adapter->ptp_flags & IGB_PTP_ENABLED) +		igb_ptp_reset(adapter);  	igb_get_phy_info(hw);  } @@ -6855,12 +6856,12 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,   **/  static bool igb_add_rx_frag(struct igb_ring *rx_ring,  			    struct igb_rx_buffer *rx_buffer, +			    unsigned int size,  			    union e1000_adv_rx_desc *rx_desc,  			    struct sk_buff *skb)  {  	struct page *page = rx_buffer->page;  	unsigned char *va = page_address(page) + rx_buffer->page_offset; -	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);  #if (PAGE_SIZE < 8192)  	unsigned int truesize = IGB_RX_BUFSZ;  #else @@ -6912,6 +6913,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,  					   union e1000_adv_rx_desc *rx_desc,  					   struct sk_buff *skb)  { +	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);  	struct igb_rx_buffer *rx_buffer;  	struct page *page; @@ -6947,11 +6949,11 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,  	dma_sync_single_range_for_cpu(rx_ring->dev,  				      rx_buffer->dma,  				      rx_buffer->page_offset, -				      IGB_RX_BUFSZ, +				      size,  				      DMA_FROM_DEVICE);  	/* pull page into skb */ -	if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { +	if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {  		/* hand second half of page back to the ring */  		igb_reuse_rx_page(rx_ring, rx_buffer);  	} else { @@ -7527,6 +7529,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,  	if (netif_running(netdev))  		__igb_close(netdev, true); +	igb_ptp_suspend(adapter); +  	igb_clear_interrupt_scheme(adapter);  #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index f097c5a8ab93..e61b647f5f2a 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -684,6 +684,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)  	u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);  	unsigned long rx_event; +	/* Other hardware uses per-packet timestamps */  	if (hw->mac.type != e1000_82576)  		return; @@ -1042,6 +1043,13 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)  		-EFAULT : 0;  } +/** + * igb_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */  void igb_ptp_init(struct igb_adapter *adapter)  {  	struct e1000_hw *hw = &adapter->hw; @@ -1064,8 +1072,7 @@ void igb_ptp_init(struct igb_adapter *adapter)  		adapter->cc.mask = CYCLECOUNTER_MASK(64);  		adapter->cc.mult = 1;  		adapter->cc.shift = IGB_82576_TSYNC_SHIFT; -		/* Dial the nominal frequency. */ -		wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); +		adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;  		break;  	case e1000_82580:  	case e1000_i354: @@ -1084,8 +1091,7 @@ void igb_ptp_init(struct igb_adapter *adapter)  		adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);  		adapter->cc.mult = 1;  		adapter->cc.shift = 0; -		/* Enable the timer functions by clearing bit 31. */ -		wr32(E1000_TSAUXC, 0x0); +		adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;  		break;  	case e1000_i210:  	case e1000_i211: @@ -1110,44 +1116,24 @@ void igb_ptp_init(struct igb_adapter *adapter)  		adapter->ptp_caps.settime64 = igb_ptp_settime_i210;  		adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;  		adapter->ptp_caps.verify = igb_ptp_verify_pin; -		/* Enable the timer functions by clearing bit 31. */ -		wr32(E1000_TSAUXC, 0x0);  		break;  	default:  		adapter->ptp_clock = NULL;  		return;  	} -	wrfl(); -  	spin_lock_init(&adapter->tmreg_lock);  	INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); -	/* Initialize the clock and overflow work for devices that need it. */ -	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { -		struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); - -		igb_ptp_settime_i210(&adapter->ptp_caps, &ts); -	} else { -		timecounter_init(&adapter->tc, &adapter->cc, -				 ktime_to_ns(ktime_get_real())); - +	if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)  		INIT_DELAYED_WORK(&adapter->ptp_overflow_work,  				  igb_ptp_overflow_check); -		schedule_delayed_work(&adapter->ptp_overflow_work, -				      IGB_SYSTIM_OVERFLOW_PERIOD); -	} - -	/* Initialize the time sync interrupts for devices that support it. */ -	if (hw->mac.type >= e1000_82580) { -		wr32(E1000_TSIM, TSYNC_INTERRUPTS); -		wr32(E1000_IMS, E1000_IMS_TS); -	} -  	adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;  	adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; +	igb_ptp_reset(adapter); +  	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,  						&adapter->pdev->dev);  	if (IS_ERR(adapter->ptp_clock)) { @@ -1156,32 +1142,24 @@ void igb_ptp_init(struct igb_adapter *adapter)  	} else {  		dev_info(&adapter->pdev->dev, "added PHC on %s\n",  			 adapter->netdev->name); -		adapter->flags |= IGB_FLAG_PTP; +		adapter->ptp_flags |= IGB_PTP_ENABLED;  	}  }  /** - * igb_ptp_stop - Disable PTP device and stop the overflow check. - * @adapter: Board private structure. + * igb_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure   * - * This function stops the PTP support and cancels the delayed work. - **/ -void igb_ptp_stop(struct igb_adapter *adapter) + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igb_ptp_suspend(struct igb_adapter *adapter)  { -	switch (adapter->hw.mac.type) { -	case e1000_82576: -	case e1000_82580: -	case e1000_i354: -	case e1000_i350: -		cancel_delayed_work_sync(&adapter->ptp_overflow_work); -		break; -	case e1000_i210: -	case e1000_i211: -		/* No delayed work to cancel. */ -		break; -	default: +	if (!(adapter->ptp_flags & IGB_PTP_ENABLED))  		return; -	} + +	if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) +		cancel_delayed_work_sync(&adapter->ptp_overflow_work);  	cancel_work_sync(&adapter->ptp_tx_work);  	if (adapter->ptp_tx_skb) { @@ -1189,12 +1167,23 @@ void igb_ptp_stop(struct igb_adapter *adapter)  		adapter->ptp_tx_skb = NULL;  		clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);  	} +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ +	igb_ptp_suspend(adapter);  	if (adapter->ptp_clock) {  		ptp_clock_unregister(adapter->ptp_clock);  		dev_info(&adapter->pdev->dev, "removed PHC on %s\n",  			 adapter->netdev->name); -		adapter->flags &= ~IGB_FLAG_PTP; +		adapter->ptp_flags &= ~IGB_PTP_ENABLED;  	}  } @@ -1209,9 +1198,6 @@ void igb_ptp_reset(struct igb_adapter *adapter)  	struct e1000_hw *hw = &adapter->hw;  	unsigned long flags; -	if (!(adapter->flags & IGB_FLAG_PTP)) -		return; -  	/* reset the tstamp_config */  	igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@ -1248,4 +1234,10 @@ void igb_ptp_reset(struct igb_adapter *adapter)  	}  out:  	spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + +	wrfl(); + +	if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) +		schedule_delayed_work(&adapter->ptp_overflow_work, +				      IGB_SYSTIM_OVERFLOW_PERIOD);  } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 9f2db1855412..9475ff9055aa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -804,8 +804,6 @@ struct ixgbe_adapter {  #define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */  	u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; - -	bool need_crosstalk_fix;  };  static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 47afed74a54d..63b25006ac90 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1813,9 +1813,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)  	/* We need to run link autotry after the driver loads */  	hw->mac.autotry_restart = true; -	if (ret_val) -		return ret_val; -  	return ixgbe_verify_fw_version_82599(hw);  } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 902d2061ce73..b4217f30e89c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -277,6 +277,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)  {  	s32 ret_val;  	u32 ctrl_ext; +	u16 device_caps;  	/* Set the media type */  	hw->phy.media_type = hw->mac.ops.get_media_type(hw); @@ -301,6 +302,22 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)  	if (ret_val)  		return ret_val; +	/* Cashe bit indicating need for crosstalk fix */ +	switch (hw->mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X550EM_x: +	case ixgbe_mac_x550em_a: +		hw->mac.ops.get_device_caps(hw, &device_caps); +		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) +			hw->need_crosstalk_fix = false; +		else +			hw->need_crosstalk_fix = true; +		break; +	default: +		hw->need_crosstalk_fix = false; +		break; +	} +  	/* Clear adapter stopped flag */  	hw->adapter_stopped = false; @@ -763,6 +780,9 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)  {  	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +	if (index > 3) +		return IXGBE_ERR_PARAM; +  	/* To turn on the LED, set mode to ON. */  	led_reg &= ~IXGBE_LED_MODE_MASK(index);  	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); @@ -781,6 +801,9 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)  {  	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +	if (index > 3) +		return IXGBE_ERR_PARAM; +  	/* To turn off the LED, set mode to OFF. */  	led_reg &= ~IXGBE_LED_MODE_MASK(index);  	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); @@ -2657,7 +2680,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)   **/  s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)  { -	int secrxreg; +	u32 secrxreg;  	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);  	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; @@ -2698,6 +2721,9 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)  	bool locked = false;  	s32 ret_val; +	if (index > 3) +		return IXGBE_ERR_PARAM; +  	/*  	 * Link must be up to auto-blink the LEDs;  	 * Force it if link is down. @@ -2741,6 +2767,9 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)  	bool locked = false;  	s32 ret_val; +	if (index > 3) +		return IXGBE_ERR_PARAM; +  	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);  	if (ret_val)  		return ret_val; @@ -3188,6 +3217,31 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)  }  /** + *  ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix + *  @hw: pointer to hardware structure + * + *  Contains the logic to identify if we need to verify link for the + *  crosstalk fix + **/ +static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) +{ +	/* Does FW say we need the fix */ +	if (!hw->need_crosstalk_fix) +		return false; + +	/* Only consider SFP+ PHYs i.e. media type fiber */ +	switch (hw->mac.ops.get_media_type(hw)) { +	case ixgbe_media_type_fiber: +	case ixgbe_media_type_fiber_qsfp: +		break; +	default: +		return false; +	} + +	return true; +} + +/**   *  ixgbe_check_mac_link_generic - Determine link and speed status   *  @hw: pointer to hardware structure   *  @speed: pointer to link speed @@ -3202,6 +3256,35 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,  	u32 links_reg, links_orig;  	u32 i; +	/* If Crosstalk fix enabled do the sanity check of making sure +	 * the SFP+ cage is full. +	 */ +	if (ixgbe_need_crosstalk_fix(hw)) { +		u32 sfp_cage_full; + +		switch (hw->mac.type) { +		case ixgbe_mac_82599EB: +			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & +					IXGBE_ESDP_SDP2; +			break; +		case ixgbe_mac_X550EM_x: +		case ixgbe_mac_x550em_a: +			sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & +					IXGBE_ESDP_SDP0; +			break; +		default: +			/* sanity check - No SFP+ devices here */ +			sfp_cage_full = false; +			break; +		} + +		if (!sfp_cage_full) { +			*link_up = false; +			*speed = IXGBE_LINK_SPEED_UNKNOWN; +			return 0; +		} +	} +  	/* clear the old state */  	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 59b771b9b354..0d7209eb5abf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2204,11 +2204,11 @@ static int ixgbe_set_phys_id(struct net_device *netdev,  		return 2;  	case ETHTOOL_ID_ON: -		hw->mac.ops.led_on(hw, IXGBE_LED_ON); +		hw->mac.ops.led_on(hw, hw->bus.func);  		break;  	case ETHTOOL_ID_OFF: -		hw->mac.ops.led_off(hw, IXGBE_LED_ON); +		hw->mac.ops.led_off(hw, hw->bus.func);  		break;  	case ETHTOOL_ID_INACTIVE: @@ -2991,10 +2991,15 @@ static int ixgbe_get_ts_info(struct net_device *dev,  {  	struct ixgbe_adapter *adapter = netdev_priv(dev); +	/* we always support timestamping disabled */ +	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); +  	switch (adapter->hw.mac.type) {  	case ixgbe_mac_X550:  	case ixgbe_mac_X550EM_x:  	case ixgbe_mac_x550em_a: +		info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); +		/* fallthrough */  	case ixgbe_mac_X540:  	case ixgbe_mac_82599EB:  		info->so_timestamping = @@ -3014,8 +3019,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,  			BIT(HWTSTAMP_TX_OFF) |  			BIT(HWTSTAMP_TX_ON); -		info->rx_filters = -			BIT(HWTSTAMP_FILTER_NONE) | +		info->rx_filters |=  			BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |  			BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |  			BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 088c47cf27d9..7871f538f0ad 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -50,7 +50,7 @@  #include <linux/if_bridge.h>  #include <linux/prefetch.h>  #include <scsi/fc/fc_fcoe.h> -#include <net/vxlan.h> +#include <net/udp_tunnel.h>  #include <net/pkt_cls.h>  #include <net/tc_act/tc_gact.h>  #include <net/tc_act/tc_mirred.h> @@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)  	if (!test_bit(__IXGBE_DOWN, &adapter->state))  		ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); -	return 0; +	return min(work_done, budget - 1);  }  /** @@ -3084,7 +3084,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)  		free_irq(entry->vector, q_vector);  	} -	free_irq(adapter->msix_entries[vector++].vector, adapter); +	free_irq(adapter->msix_entries[vector].vector, adapter);  }  /** @@ -5625,7 +5625,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)  	struct pci_dev *pdev = adapter->pdev;  	unsigned int rss, fdir;  	u32 fwsm; -	u16 device_caps;  	int i;  	/* PCI config space info */ @@ -5722,9 +5721,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)  #ifdef CONFIG_IXGBE_DCA  		adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;  #endif -#ifdef CONFIG_IXGBE_VXLAN  		adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; -#endif  		break;  	default:  		break; @@ -5773,22 +5770,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)  	adapter->tx_ring_count = IXGBE_DEFAULT_TXD;  	adapter->rx_ring_count = IXGBE_DEFAULT_RXD; -	/* Cache bit indicating need for crosstalk fix */ -	switch (hw->mac.type) { -	case ixgbe_mac_82599EB: -	case ixgbe_mac_X550EM_x: -	case ixgbe_mac_x550em_a: -		hw->mac.ops.get_device_caps(hw, &device_caps); -		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) -			adapter->need_crosstalk_fix = false; -		else -			adapter->need_crosstalk_fix = true; -		break; -	default: -		adapter->need_crosstalk_fix = false; -		break; -	} -  	/* set default work limits */  	adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; @@ -6158,9 +6139,7 @@ int ixgbe_open(struct net_device *netdev)  	ixgbe_up_complete(adapter);  	ixgbe_clear_vxlan_port(adapter); -#ifdef CONFIG_IXGBE_VXLAN -	vxlan_get_rx_port(netdev); -#endif +	udp_tunnel_get_rx_info(netdev);  	return 0; @@ -6711,18 +6690,6 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)  		link_up = true;  	} -	/* If Crosstalk fix enabled do the sanity check of making sure -	 * the SFP+ cage is empty. -	 */ -	if (adapter->need_crosstalk_fix) { -		u32 sfp_cage_full; - -		sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & -				IXGBE_ESDP_SDP2; -		if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) -			link_up = false; -	} -  	if (adapter->ixgbe_ieee_pfc)  		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); @@ -7069,16 +7036,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)  	struct ixgbe_hw *hw = &adapter->hw;  	s32 err; -	/* If crosstalk fix enabled verify the SFP+ cage is full */ -	if (adapter->need_crosstalk_fix) { -		u32 sfp_cage_full; - -		sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & -				IXGBE_ESDP_SDP2; -		if (!sfp_cage_full) -			return; -	} -  	/* not searching for SFP so there is nothing to do here */  	if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&  	    !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) @@ -7262,14 +7219,12 @@ static void ixgbe_service_task(struct work_struct *work)  		ixgbe_service_event_complete(adapter);  		return;  	} -#ifdef CONFIG_IXGBE_VXLAN -	rtnl_lock();  	if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { +		rtnl_lock();  		adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; -		vxlan_get_rx_port(adapter->netdev); +		udp_tunnel_get_rx_info(adapter->netdev); +		rtnl_unlock();  	} -	rtnl_unlock(); -#endif /* CONFIG_IXGBE_VXLAN */  	ixgbe_reset_subtask(adapter);  	ixgbe_phy_interrupt_subtask(adapter);  	ixgbe_sfp_detection_subtask(adapter); @@ -7697,7 +7652,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,  	/* snag network header to get L4 type and address */  	skb = first->skb;  	hdr.network = skb_network_header(skb); -#ifdef CONFIG_IXGBE_VXLAN  	if (skb->encapsulation &&  	    first->protocol == htons(ETH_P_IP) &&  	    hdr.ipv4->protocol != IPPROTO_UDP) { @@ -7708,7 +7662,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,  		    udp_hdr(skb)->dest == adapter->vxlan_port)  			hdr.network = skb_inner_network_header(skb);  	} -#endif /* CONFIG_IXGBE_VXLAN */  	/* Currently only IPv4/IPv6 with TCP is supported */  	switch (hdr.ipv4->version) { @@ -8308,14 +8261,53 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)  static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,  			       struct tc_cls_u32_offload *cls)  { +	u32 hdl = cls->knode.handle;  	u32 uhtid = TC_U32_USERHTID(cls->knode.handle); -	u32 loc; -	int err; +	u32 loc = cls->knode.handle & 0xfffff; +	int err = 0, i, j; +	struct ixgbe_jump_table *jump = NULL; + +	if (loc > IXGBE_MAX_HW_ENTRIES) +		return -EINVAL;  	if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))  		return -EINVAL; -	loc = cls->knode.handle & 0xfffff; +	/* Clear this filter in the link data it is associated with */ +	if (uhtid != 0x800) { +		jump = adapter->jump_tables[uhtid]; +		if (!jump) +			return -EINVAL; +		if (!test_bit(loc - 1, jump->child_loc_map)) +			return -EINVAL; +		clear_bit(loc - 1, jump->child_loc_map); +	} + +	/* Check if the filter being deleted is a link */ +	for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { +		jump = adapter->jump_tables[i]; +		if (jump && jump->link_hdl == hdl) { +			/* Delete filters in the hardware in the child hash +			 * table associated with this link +			 */ +			for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { +				if (!test_bit(j, jump->child_loc_map)) +					continue; +				spin_lock(&adapter->fdir_perfect_lock); +				err = ixgbe_update_ethtool_fdir_entry(adapter, +								      NULL, +								      j + 1); +				spin_unlock(&adapter->fdir_perfect_lock); +				clear_bit(j, jump->child_loc_map); +			} +			/* Remove resources for this link */ +			kfree(jump->input); +			kfree(jump->mask); +			kfree(jump); +			adapter->jump_tables[i] = NULL; +			return err; +		} +	}  	spin_lock(&adapter->fdir_perfect_lock);  	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); @@ -8549,6 +8541,18 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,  		if (!test_bit(link_uhtid - 1, &adapter->tables))  			return err; +		/* Multiple filters as links to the same hash table are not +		 * supported. To add a new filter with the same next header +		 * but different match/jump conditions, create a new hash table +		 * and link to it. +		 */ +		if (adapter->jump_tables[link_uhtid] && +		    (adapter->jump_tables[link_uhtid])->link_hdl) { +			e_err(drv, "Link filter exists for link: %x\n", +			      link_uhtid); +			return err; +		} +  		for (i = 0; nexthdr[i].jump; i++) {  			if (nexthdr[i].o != cls->knode.sel->offoff ||  			    nexthdr[i].s != cls->knode.sel->offshift || @@ -8570,6 +8574,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,  			}  			jump->input = input;  			jump->mask = mask; +			jump->link_hdl = cls->knode.handle; +  			err = ixgbe_clsu32_build_input(input, mask, cls,  						       field_ptr, &nexthdr[i]);  			if (!err) { @@ -8597,6 +8603,20 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,  		if ((adapter->jump_tables[uhtid])->mask)  			memcpy(mask, (adapter->jump_tables[uhtid])->mask,  			       sizeof(*mask)); + +		/* Lookup in all child hash tables if this location is already +		 * filled with a filter +		 */ +		for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { +			struct ixgbe_jump_table *link = adapter->jump_tables[i]; + +			if (link && (test_bit(loc - 1, link->child_loc_map))) { +				e_err(drv, "Filter exists in location: %x\n", +				      loc); +				err = -EINVAL; +				goto err_out; +			} +		}  	}  	err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);  	if (err) @@ -8628,6 +8648,9 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,  		ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);  	spin_unlock(&adapter->fdir_perfect_lock); +	if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) +		set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); +  	kfree(mask);  	return err;  err_out_w_lock: @@ -8770,14 +8793,12 @@ static int ixgbe_set_features(struct net_device *netdev,  	netdev->features = features; -#ifdef CONFIG_IXGBE_VXLAN  	if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {  		if (features & NETIF_F_RXCSUM)  			adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;  		else  			ixgbe_clear_vxlan_port(adapter);  	} -#endif /* CONFIG_IXGBE_VXLAN */  	if (need_reset)  		ixgbe_do_reset(netdev); @@ -8788,23 +8809,25 @@ static int ixgbe_set_features(struct net_device *netdev,  	return 0;  } -#ifdef CONFIG_IXGBE_VXLAN  /**   * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up   * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifiying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information   **/ -static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, -				 __be16 port) +static void ixgbe_add_vxlan_port(struct net_device *dev, +				 struct udp_tunnel_info *ti)  {  	struct ixgbe_adapter *adapter = netdev_priv(dev);  	struct ixgbe_hw *hw = &adapter->hw; +	__be16 port = ti->port; -	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) +	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)  		return; -	if (sa_family == AF_INET6) +	if (ti->sa_family != AF_INET) +		return; + +	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))  		return;  	if (adapter->vxlan_port == port) @@ -8824,30 +8847,31 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,  /**   * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away   * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to + * @ti: Tunnel endpoint information   **/ -static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, -				 __be16 port) +static void ixgbe_del_vxlan_port(struct net_device *dev, +				 struct udp_tunnel_info *ti)  {  	struct ixgbe_adapter *adapter = netdev_priv(dev); -	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) +	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)  		return; -	if (sa_family == AF_INET6) +	if (ti->sa_family != AF_INET)  		return; -	if (adapter->vxlan_port != port) { +	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) +		return; + +	if (adapter->vxlan_port != ti->port) {  		netdev_info(dev, "Port %d was not found, not deleting\n", -			    ntohs(port)); +			    ntohs(ti->port));  		return;  	}  	ixgbe_clear_vxlan_port(adapter);  	adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;  } -#endif /* CONFIG_IXGBE_VXLAN */  static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],  			     struct net_device *dev, @@ -9160,10 +9184,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {  	.ndo_bridge_getlink	= ixgbe_ndo_bridge_getlink,  	.ndo_dfwd_add_station	= ixgbe_fwd_add,  	.ndo_dfwd_del_station	= ixgbe_fwd_del, -#ifdef CONFIG_IXGBE_VXLAN -	.ndo_add_vxlan_port	= ixgbe_add_vxlan_port, -	.ndo_del_vxlan_port	= ixgbe_del_vxlan_port, -#endif /* CONFIG_IXGBE_VXLAN */ +	.ndo_udp_tunnel_add	= ixgbe_add_vxlan_port, +	.ndo_udp_tunnel_del	= ixgbe_del_vxlan_port,  	.ndo_features_check	= ixgbe_features_check,  }; @@ -10051,6 +10073,7 @@ static int __init ixgbe_init_module(void)  	ret = pci_register_driver(&ixgbe_driver);  	if (ret) { +		destroy_workqueue(ixgbe_wq);  		ixgbe_dbg_exit();  		return ret;  	} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index a8bed3d887f7..538a1c5475b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -42,8 +42,12 @@ struct ixgbe_jump_table {  	struct ixgbe_mat_field *mat;  	struct ixgbe_fdir_filter *input;  	union ixgbe_atr_input *mask; +	u32 link_hdl; +	unsigned long child_loc_map[32];  }; +#define IXGBE_MAX_HW_ENTRIES 2045 +  static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,  				     union ixgbe_atr_input *mask,  				     u32 val, u32 m) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index c5caacdd193d..8618599dfd6f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -954,6 +954,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,  			struct ixgbe_hw *hw = &adapter->hw;  			hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); +			hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);  		}  	} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index da3d8358fee0..1248a9936f7a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3525,6 +3525,7 @@ struct ixgbe_hw {  	bool				force_full_reset;  	bool				allow_unsupported_sfp;  	bool				wol_enabled; +	bool				need_crosstalk_fix;  };  struct ixgbe_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 19b75cd98682..4716ca499e67 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1618,6 +1618,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)  {  	struct ixgbe_mac_info *mac = &hw->mac; +	mac->ops.setup_fc = ixgbe_setup_fc_x550em; +  	switch (mac->ops.get_media_type(hw)) {  	case ixgbe_media_type_fiber:  		/* CS4227 does not support autoneg, so disable the laser control @@ -1627,7 +1629,6 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)  		mac->ops.enable_tx_laser = NULL;  		mac->ops.flap_tx_laser = NULL;  		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; -		mac->ops.setup_fc = ixgbe_setup_fc_x550em;  		switch (hw->device_id) {  		case IXGBE_DEV_ID_X550EM_A_SFP_N:  			mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; @@ -1655,7 +1656,6 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)  			mac->ops.setup_link = ixgbe_setup_sgmii;  		break;  	default: -		mac->ops.setup_fc = ixgbe_setup_fc_x550em;  		break;  	}  } diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index ae09d60e7b67..8617cae2f801 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -32,6 +32,7 @@  #define IXGBE_DEV_ID_X540_VF		0x1515  #define IXGBE_DEV_ID_X550_VF		0x1565  #define IXGBE_DEV_ID_X550EM_X_VF	0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF	0x15C5  #define IXGBE_DEV_ID_82599_VF_HV	0x152E  #define IXGBE_DEV_ID_X540_VF_HV		0x1530 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index d5944c391cbb..be52f597688b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -457,6 +457,7 @@ enum ixgbevf_boards {  	board_X550_vf_hv,  	board_X550EM_x_vf,  	board_X550EM_x_vf_hv, +	board_x550em_a_vf,  };  enum ixgbevf_xcast_modes { @@ -470,6 +471,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info;  extern const struct ixgbevf_info ixgbevf_X550_vf_info;  extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;  extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; +extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;  extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;  extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index acc24010cfe0..d9d6616f02a4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -56,7 +56,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";  static const char ixgbevf_driver_string[] =  	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "2.12.1-k" +#define DRV_VERSION "3.2.2-k"  const char ixgbevf_driver_version[] = DRV_VERSION;  static char ixgbevf_copyright[] =  	"Copyright (c) 2009 - 2015 Intel Corporation."; @@ -70,6 +70,7 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {  	[board_X550_vf_hv]	= &ixgbevf_X550_vf_hv_info,  	[board_X550EM_x_vf]	= &ixgbevf_X550EM_x_vf_info,  	[board_X550EM_x_vf_hv]	= &ixgbevf_X550EM_x_vf_hv_info, +	[board_x550em_a_vf]	= &ixgbevf_x550em_a_vf_info,  };  /* ixgbevf_pci_tbl - PCI Device ID Table @@ -89,6 +90,7 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {  	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },  	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },  	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, +	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },  	/* required last entry */  	{0, }  }; @@ -1800,16 +1802,19 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,   **/  static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)  { -	int i;  	struct ixgbe_hw *hw = &adapter->hw;  	struct net_device *netdev = adapter->netdev; +	int i, ret;  	ixgbevf_setup_psrtype(adapter);  	if (hw->mac.type >= ixgbe_mac_X550_vf)  		ixgbevf_setup_vfmrqc(adapter);  	/* notify the PF of our intent to use this size of frame */ -	hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); +	ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); +	if (ret) +		dev_err(&adapter->pdev->dev, +			"Failed to set MTU at %d\n", netdev->mtu);  	/* Setup the HW Rx Head and Tail Descriptor Pointers and  	 * the Base and Length of the Rx Descriptor Ring @@ -2772,12 +2777,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)  	/* If we're already down or resetting, just bail */  	if (test_bit(__IXGBEVF_DOWN, &adapter->state) || +	    test_bit(__IXGBEVF_REMOVING, &adapter->state) ||  	    test_bit(__IXGBEVF_RESETTING, &adapter->state))  		return;  	adapter->tx_timeout_count++; +	rtnl_lock();  	ixgbevf_reinit_locked(adapter); +	rtnl_unlock();  }  /** @@ -3732,6 +3740,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)  	struct ixgbe_hw *hw = &adapter->hw;  	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;  	int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; +	int ret;  	switch (adapter->hw.api_version) {  	case ixgbe_mbox_api_11: @@ -3748,14 +3757,17 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)  	if ((new_mtu < 68) || (max_frame > max_possible_frame))  		return -EINVAL; +	/* notify the PF of our intent to use this size of frame */ +	ret = hw->mac.ops.set_rlpml(hw, max_frame); +	if (ret) +		return -EINVAL; +  	hw_dbg(hw, "changing MTU from %d to %d\n",  	       netdev->mtu, new_mtu); +  	/* must set new MTU before calling down or up */  	netdev->mtu = new_mtu; -	/* notify the PF of our intent to use this size of frame */ -	hw->mac.ops.set_rlpml(hw, max_frame); -  	return 0;  } diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index 61a80da8b6f0..2819abc454c7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)  static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)  {  	struct ixgbe_mbx_info *mbx = &hw->mbx; -	s32 ret_val = -IXGBE_ERR_MBX; +	s32 ret_val = IXGBE_ERR_MBX;  	if (!mbx->ops.read)  		goto out; @@ -111,7 +111,7 @@ out:  static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)  {  	struct ixgbe_mbx_info *mbx = &hw->mbx; -	s32 ret_val = -IXGBE_ERR_MBX; +	s32 ret_val = IXGBE_ERR_MBX;  	/* exit if either we can't write or there isn't a defined timeout */  	if (!mbx->ops.write || !mbx->timeout) diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index e670d3b19c3c..a52f70ec42b6 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -33,6 +33,18 @@   */  #define IXGBE_HV_RESET_OFFSET           0x201 +static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, +					     u32 *retmsg, u16 size) +{ +	struct ixgbe_mbx_info *mbx = &hw->mbx; +	s32 retval = mbx->ops.write_posted(hw, msg, size); + +	if (retval) +		return retval; + +	return mbx->ops.read_posted(hw, retmsg, size); +} +  /**   *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx   *  @hw: pointer to hardware structure @@ -255,8 +267,7 @@ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)  static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)  { -	struct ixgbe_mbx_info *mbx = &hw->mbx; -	u32 msgbuf[3]; +	u32 msgbuf[3], msgbuf_chk;  	u8 *msg_addr = (u8 *)(&msgbuf[1]);  	s32 ret_val; @@ -268,19 +279,18 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)  	 */  	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;  	msgbuf[0] |= IXGBE_VF_SET_MACVLAN; +	msgbuf_chk = msgbuf[0]; +  	if (addr)  		ether_addr_copy(msg_addr, addr); -	ret_val = mbx->ops.write_posted(hw, msgbuf, 3); -	if (!ret_val) -		ret_val = mbx->ops.read_posted(hw, msgbuf, 3); +	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); +	if (!ret_val) { +		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; -	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - -	if (!ret_val) -		if (msgbuf[0] == -		    (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) -			ret_val = -ENOMEM; +		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK)) +			return -ENOMEM; +	}  	return ret_val;  } @@ -423,7 +433,6 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)  static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,  			      u32 vmdq)  { -	struct ixgbe_mbx_info *mbx = &hw->mbx;  	u32 msgbuf[3];  	u8 *msg_addr = (u8 *)(&msgbuf[1]);  	s32 ret_val; @@ -431,10 +440,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,  	memset(msgbuf, 0, sizeof(msgbuf));  	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;  	ether_addr_copy(msg_addr, addr); -	ret_val = mbx->ops.write_posted(hw, msgbuf, 3); -	if (!ret_val) -		ret_val = mbx->ops.read_posted(hw, msgbuf, 3); +	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);  	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -468,17 +475,6 @@ static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,  	return -EOPNOTSUPP;  } -static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, -				       u32 *msg, u16 size) -{ -	struct ixgbe_mbx_info *mbx = &hw->mbx; -	u32 retmsg[IXGBE_VFMAILBOX_SIZE]; -	s32 retval = mbx->ops.write_posted(hw, msg, size); - -	if (!retval) -		mbx->ops.read_posted(hw, retmsg, size); -} -  /**   *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses   *  @hw: pointer to the HW structure @@ -519,7 +515,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,  		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);  	} -	ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); +	ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);  	return 0;  } @@ -542,7 +538,6 @@ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,   **/  static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)  { -	struct ixgbe_mbx_info *mbx = &hw->mbx;  	u32 msgbuf[2];  	s32 err; @@ -556,11 +551,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)  	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;  	msgbuf[1] = xcast_mode; -	err = mbx->ops.write_posted(hw, msgbuf, 2); -	if (err) -		return err; - -	err = mbx->ops.read_posted(hw, msgbuf, 2); +	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);  	if (err)  		return err; @@ -589,7 +580,6 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)  static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,  			       bool vlan_on)  { -	struct ixgbe_mbx_info *mbx = &hw->mbx;  	u32 msgbuf[2];  	s32 err; @@ -598,11 +588,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,  	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */  	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; -	err = mbx->ops.write_posted(hw, msgbuf, 2); -	if (err) -		goto mbx_err; - -	err = mbx->ops.read_posted(hw, msgbuf, 2); +	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);  	if (err)  		goto mbx_err; @@ -797,13 +783,22 @@ out:   *  @hw: pointer to the HW structure   *  @max_size: value to assign to max frame size   **/ -static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)  {  	u32 msgbuf[2]; +	s32 ret_val;  	msgbuf[0] = IXGBE_VF_SET_LPE;  	msgbuf[1] = max_size; -	ixgbevf_write_msg_read_ack(hw, msgbuf, 2); + +	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); +	if (ret_val) +		return ret_val; +	if ((msgbuf[0] & IXGBE_VF_SET_LPE) && +	    (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) +		return IXGBE_ERR_MBX; + +	return 0;  }  /** @@ -812,7 +807,7 @@ static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)   * @max_size: value to assign to max frame size   * Hyper-V variant.   **/ -static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)  {  	u32 reg; @@ -823,6 +818,8 @@ static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)  	/* CRC == 4 */  	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);  	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); + +	return 0;  }  /** @@ -839,11 +836,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)  	msg[0] = IXGBE_VF_API_NEGOTIATE;  	msg[1] = api;  	msg[2] = 0; -	err = hw->mbx.ops.write_posted(hw, msg, 3); - -	if (!err) -		err = hw->mbx.ops.read_posted(hw, msg, 3); +	err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);  	if (!err) {  		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -892,11 +886,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,  	/* Fetch queue configuration from the PF */  	msg[0] = IXGBE_VF_GET_QUEUE;  	msg[1] = msg[2] = msg[3] = msg[4] = 0; -	err = hw->mbx.ops.write_posted(hw, msg, 5); - -	if (!err) -		err = hw->mbx.ops.read_posted(hw, msg, 5); +	err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);  	if (!err) {  		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -1005,3 +996,8 @@ const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {  	.mac = ixgbe_mac_X550EM_x_vf,  	.mac_ops = &ixgbevf_hv_mac_ops,  }; + +const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { +	.mac = ixgbe_mac_x550em_a_vf, +	.mac_ops = &ixgbevf_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 2cac610f32ba..04d8d4ee4f04 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -69,7 +69,7 @@ struct ixgbe_mac_operations {  	s32 (*disable_mc)(struct ixgbe_hw *);  	s32 (*clear_vfta)(struct ixgbe_hw *);  	s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); -	void (*set_rlpml)(struct ixgbe_hw *, u16); +	s32 (*set_rlpml)(struct ixgbe_hw *, u16);  };  enum ixgbe_mac_type { @@ -78,6 +78,7 @@ enum ixgbe_mac_type {  	ixgbe_mac_X540_vf,  	ixgbe_mac_X550_vf,  	ixgbe_mac_X550EM_x_vf, +	ixgbe_mac_x550em_a_vf,  	ixgbe_num_macs  };  |