diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 54 | 
1 files changed, 39 insertions, 15 deletions
| diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 13c4782b920a..0f9f022260d7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -165,6 +165,9 @@ MODULE_AUTHOR("Intel Corporation, <[email protected]>");  MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");  MODULE_LICENSE("GPL v2"); +DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); +EXPORT_SYMBOL(ixgbe_xdp_locking_key); +  static struct workqueue_struct *ixgbe_wq;  static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); @@ -2197,6 +2200,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,  {  	int err, result = IXGBE_XDP_PASS;  	struct bpf_prog *xdp_prog; +	struct ixgbe_ring *ring;  	struct xdp_frame *xdpf;  	u32 act; @@ -2215,7 +2219,12 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,  		xdpf = xdp_convert_buff_to_frame(xdp);  		if (unlikely(!xdpf))  			goto out_failure; -		result = ixgbe_xmit_xdp_ring(adapter, xdpf); +		ring = ixgbe_determine_xdp_ring(adapter); +		if (static_branch_unlikely(&ixgbe_xdp_locking_key)) +			spin_lock(&ring->tx_lock); +		result = ixgbe_xmit_xdp_ring(ring, xdpf); +		if (static_branch_unlikely(&ixgbe_xdp_locking_key)) +			spin_unlock(&ring->tx_lock);  		if (result == IXGBE_XDP_CONSUMED)  			goto out_failure;  		break; @@ -2422,13 +2431,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,  		xdp_do_flush_map();  	if (xdp_xmit & IXGBE_XDP_TX) { -		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; +		struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); -		/* Force memory writes to complete before letting h/w -		 * know there are new descriptors to fetch. -		 */ -		wmb(); -		writel(ring->next_to_use, ring->tail); +		ixgbe_xdp_ring_update_tail_locked(ring);  	}  	u64_stats_update_begin(&rx_ring->syncp); @@ -6320,7 +6325,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,  	if (ixgbe_init_rss_key(adapter))  		return -ENOMEM; -	adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); +	adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL);  	if (!adapter->af_xdp_zc_qps)  		return -ENOMEM; @@ -8536,10 +8541,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,  }  #endif -int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, +int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,  			struct xdp_frame *xdpf)  { -	struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];  	struct ixgbe_tx_buffer *tx_buffer;  	union ixgbe_adv_tx_desc *tx_desc;  	u32 len, cmd_type; @@ -8788,7 +8792,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)  	if (!is_valid_ether_addr(addr->sa_data))  		return -EADDRNOTAVAIL; -	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); +	eth_hw_addr_set(netdev, addr->sa_data);  	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);  	ixgbe_mac_set_default_filter(adapter); @@ -10131,8 +10135,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)  			return -EINVAL;  	} -	if (nr_cpu_ids > MAX_XDP_QUEUES) +	/* if the number of cpus is much larger than the maximum of queues, +	 * we should stop it and then return with ENOMEM like before. +	 */ +	if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)  		return -ENOMEM; +	else if (nr_cpu_ids > IXGBE_MAX_XDP_QS) +		static_branch_inc(&ixgbe_xdp_locking_key);  	old_prog = xchg(&adapter->xdp_prog, prog);  	need_reset = (!!prog != !!old_prog); @@ -10199,6 +10208,15 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)  	writel(ring->next_to_use, ring->tail);  } +void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) +{ +	if (static_branch_unlikely(&ixgbe_xdp_locking_key)) +		spin_lock(&ring->tx_lock); +	ixgbe_xdp_ring_update_tail(ring); +	if (static_branch_unlikely(&ixgbe_xdp_locking_key)) +		spin_unlock(&ring->tx_lock); +} +  static int ixgbe_xdp_xmit(struct net_device *dev, int n,  			  struct xdp_frame **frames, u32 flags)  { @@ -10216,18 +10234,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,  	/* During program transitions its possible adapter->xdp_prog is assigned  	 * but ring has not been configured yet. In this case simply abort xmit.  	 */ -	ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; +	ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL;  	if (unlikely(!ring))  		return -ENXIO;  	if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))  		return -ENXIO; +	if (static_branch_unlikely(&ixgbe_xdp_locking_key)) +		spin_lock(&ring->tx_lock); +  	for (i = 0; i < n; i++) {  		struct xdp_frame *xdpf = frames[i];  		int err; -		err = ixgbe_xmit_xdp_ring(adapter, xdpf); +		err = ixgbe_xmit_xdp_ring(ring, xdpf);  		if (err != IXGBE_XDP_TX)  			break;  		nxmit++; @@ -10236,6 +10257,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,  	if (unlikely(flags & XDP_XMIT_FLUSH))  		ixgbe_xdp_ring_update_tail(ring); +	if (static_branch_unlikely(&ixgbe_xdp_locking_key)) +		spin_unlock(&ring->tx_lock); +  	return nxmit;  } @@ -10903,7 +10927,7 @@ skip_sriov:  	eth_platform_get_mac_address(&adapter->pdev->dev,  				     adapter->hw.mac.perm_addr); -	memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); +	eth_hw_addr_set(netdev, hw->mac.perm_addr);  	if (!is_valid_ether_addr(netdev->dev_addr)) {  		e_dev_err("invalid MAC address\n"); |