diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 134 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 23 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h | 1 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 3 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 40 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 69 | 
8 files changed, 266 insertions, 8 deletions
| diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ca932387a80f..7be725cdfea8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -52,6 +52,11 @@  #include <linux/dca.h>  #endif +#include <net/busy_poll.h> + +#ifdef CONFIG_NET_LL_RX_POLL +#define LL_EXTENDED_STATS +#endif  /* common prefix used by pr_<> macros */  #undef pr_fmt  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -182,6 +187,11 @@ struct ixgbe_rx_buffer {  struct ixgbe_queue_stats {  	u64 packets;  	u64 bytes; +#ifdef LL_EXTENDED_STATS +	u64 yields; +	u64 misses; +	u64 cleaned; +#endif  /* LL_EXTENDED_STATS */  };  struct ixgbe_tx_queue_stats { @@ -356,9 +366,133 @@ struct ixgbe_q_vector {  	struct rcu_head rcu;	/* to avoid race with update stats on free */  	char name[IFNAMSIZ + 9]; +#ifdef CONFIG_NET_LL_RX_POLL +	unsigned int state; +#define IXGBE_QV_STATE_IDLE        0 +#define IXGBE_QV_STATE_NAPI	   1    /* NAPI owns this QV */ +#define IXGBE_QV_STATE_POLL	   2    /* poll owns this QV */ +#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) +#define IXGBE_QV_STATE_NAPI_YIELD  4    /* NAPI yielded this QV */ +#define IXGBE_QV_STATE_POLL_YIELD  8    /* poll yielded this QV */ +#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) +#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) +	spinlock_t lock; +#endif  /* CONFIG_NET_LL_RX_POLL */ +  	/* for dynamic allocation of rings associated with this q_vector */  	struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;  }; +#ifdef CONFIG_NET_LL_RX_POLL +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ + +	spin_lock_init(&q_vector->lock); +	q_vector->state = IXGBE_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ +	int rc = true; +	spin_lock(&q_vector->lock); +	if (q_vector->state & IXGBE_QV_LOCKED) { +		WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); +		q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; +		rc = false; +#ifdef LL_EXTENDED_STATS +		q_vector->tx.ring->stats.yields++; +#endif +	} else +		/* we don't care if someone yielded */ +		q_vector->state = IXGBE_QV_STATE_NAPI; +	spin_unlock(&q_vector->lock); +	return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ +	int rc = false; +	spin_lock(&q_vector->lock); +	WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | +			       IXGBE_QV_STATE_NAPI_YIELD)); + +	if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) +		rc = true; +	q_vector->state = IXGBE_QV_STATE_IDLE; +	spin_unlock(&q_vector->lock); +	return rc; +} + +/* called from ixgbe_low_latency_poll() */ +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ +	int rc = true; +	spin_lock_bh(&q_vector->lock); +	if ((q_vector->state & IXGBE_QV_LOCKED)) { +		q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; +		rc = false; +#ifdef LL_EXTENDED_STATS +		q_vector->rx.ring->stats.yields++; +#endif +	} else +		/* preserve yield marks */ +		q_vector->state |= IXGBE_QV_STATE_POLL; +	spin_unlock_bh(&q_vector->lock); +	return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ +	int rc = false; +	spin_lock_bh(&q_vector->lock); +	WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI)); + +	if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) +		rc = true; +	q_vector->state = IXGBE_QV_STATE_IDLE; +	spin_unlock_bh(&q_vector->lock); +	return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) +{ +	WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); +	return q_vector->state & IXGBE_QV_USER_PEND; +} +#else /* CONFIG_NET_LL_RX_POLL */ +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ +} + +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ +	return true; +} + +static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ +	return false; +} + +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ +	return false; +} + +static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ +	return false; +} + +static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) +{ +	return false; +} +#endif /* CONFIG_NET_LL_RX_POLL */ +  #ifdef CONFIG_IXGBE_HWMON  #define IXGBE_HWMON_TYPE_LOC		0 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 1f2c805684dd..e055e000131b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -380,3 +380,26 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,  	}  	return 0;  } + +static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) +{ +	u32 reg, i; + +	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); +	for (i = 0; i < MAX_USER_PRIORITY; i++) +		map[i] = IXGBE_RTRUP2TC_UP_MASK & +			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); +	return; +} + +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ +	switch (hw->mac.type) { +	case ixgbe_mac_82599EB: +	case ixgbe_mac_X540: +		ixgbe_dcb_read_rtrup2tc_82599(hw, map); +		break; +	default: +		break; +	} +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 1634de8b627f..fc0a2dd52499 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -159,6 +159,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,  s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);  s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); +  /* DCB definitions for credit calculation */  #define DCB_CREDIT_QUANTUM	64   /* DCB Quantum */  #define MAX_CREDIT_REFILL       511  /* 0x1FF * 64B = 32704B */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index a4ef07631d1e..d71d9ce3e394 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -45,6 +45,7 @@  /* Receive UP2TC mapping */  #define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_MASK	7  /* Transmit UP2TC mapping */  #define IXGBE_RTTUP2TC_UP_SHIFT 3 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index f3d68f9696ba..edd89a1ef27f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -554,6 +554,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,  		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)  			adapter->ixgbe_ieee_ets->prio_tc[i] =  				IEEE_8021QAZ_MAX_TCS; +		/* if possible update UP2TC mappings from HW */ +		ixgbe_dcb_read_rtrup2tc(&adapter->hw, +					adapter->ixgbe_ieee_ets->prio_tc);  	}  	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index d3754722adb4..24e2e7aafda2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1054,6 +1054,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,  			data[i] = 0;  			data[i+1] = 0;  			i += 2; +#ifdef LL_EXTENDED_STATS +			data[i] = 0; +			data[i+1] = 0; +			data[i+2] = 0; +			i += 3; +#endif  			continue;  		} @@ -1063,6 +1069,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,  			data[i+1] = ring->stats.bytes;  		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));  		i += 2; +#ifdef LL_EXTENDED_STATS +		data[i] = ring->stats.yields; +		data[i+1] = ring->stats.misses; +		data[i+2] = ring->stats.cleaned; +		i += 3; +#endif  	}  	for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {  		ring = adapter->rx_ring[j]; @@ -1070,6 +1082,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,  			data[i] = 0;  			data[i+1] = 0;  			i += 2; +#ifdef LL_EXTENDED_STATS +			data[i] = 0; +			data[i+1] = 0; +			data[i+2] = 0; +			i += 3; +#endif  			continue;  		} @@ -1079,6 +1097,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,  			data[i+1] = ring->stats.bytes;  		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));  		i += 2; +#ifdef LL_EXTENDED_STATS +		data[i] = ring->stats.yields; +		data[i+1] = ring->stats.misses; +		data[i+2] = ring->stats.cleaned; +		i += 3; +#endif  	}  	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { @@ -1115,12 +1139,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,  			p += ETH_GSTRING_LEN;  			sprintf(p, "tx_queue_%u_bytes", i);  			p += ETH_GSTRING_LEN; +#ifdef LL_EXTENDED_STATS +			sprintf(p, "tx_q_%u_napi_yield", i); +			p += ETH_GSTRING_LEN; +			sprintf(p, "tx_q_%u_misses", i); +			p += ETH_GSTRING_LEN; +			sprintf(p, "tx_q_%u_cleaned", i); +			p += ETH_GSTRING_LEN; +#endif /* LL_EXTENDED_STATS */  		}  		for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {  			sprintf(p, "rx_queue_%u_packets", i);  			p += ETH_GSTRING_LEN;  			sprintf(p, "rx_queue_%u_bytes", i);  			p += ETH_GSTRING_LEN; +#ifdef LL_EXTENDED_STATS +			sprintf(p, "rx_q_%u_ll_poll_yield", i); +			p += ETH_GSTRING_LEN; +			sprintf(p, "rx_q_%u_misses", i); +			p += ETH_GSTRING_LEN; +			sprintf(p, "rx_q_%u_cleaned", i); +			p += ETH_GSTRING_LEN; +#endif /* LL_EXTENDED_STATS */  		}  		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {  			sprintf(p, "tx_pb_%u_pxon", i); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ef5f7a678ce1..90b4e1089ecc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -811,6 +811,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,  	/* initialize NAPI */  	netif_napi_add(adapter->netdev, &q_vector->napi,  		       ixgbe_poll, 64); +	napi_hash_add(&q_vector->napi);  	/* tie q_vector and adapter together */  	adapter->q_vector[v_idx] = q_vector; @@ -931,6 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)  		adapter->rx_ring[ring->queue_index] = NULL;  	adapter->q_vector[v_idx] = NULL; +	napi_hash_del(&q_vector->napi);  	netif_napi_del(&q_vector->napi);  	/* diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d30fbdd81fca..bad8f14b1941 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1504,7 +1504,9 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,  {  	struct ixgbe_adapter *adapter = q_vector->adapter; -	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) +	if (ixgbe_qv_ll_polling(q_vector)) +		netif_receive_skb(skb); +	else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))  		napi_gro_receive(&q_vector->napi, skb);  	else  		netif_rx(skb); @@ -1892,9 +1894,9 @@ dma_sync:   * expensive overhead for IOMMU access this provides a means of avoiding   * it by maintaining the mapping of the page to the syste.   * - * Returns true if all work is completed without reaching budget + * Returns amount of work completed   **/ -static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, +static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,  			       struct ixgbe_ring *rx_ring,  			       const int budget)  { @@ -1976,6 +1978,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,  		}  #endif /* IXGBE_FCOE */ +		skb_mark_napi_id(skb, &q_vector->napi);  		ixgbe_rx_skb(q_vector, skb);  		/* update budget accounting */ @@ -1992,9 +1995,43 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,  	if (cleaned_count)  		ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); -	return (total_rx_packets < budget); +	return total_rx_packets;  } +#ifdef CONFIG_NET_LL_RX_POLL +/* must be called with local_bh_disable()d */ +static int ixgbe_low_latency_recv(struct napi_struct *napi) +{ +	struct ixgbe_q_vector *q_vector = +			container_of(napi, struct ixgbe_q_vector, napi); +	struct ixgbe_adapter *adapter = q_vector->adapter; +	struct ixgbe_ring  *ring; +	int found = 0; + +	if (test_bit(__IXGBE_DOWN, &adapter->state)) +		return LL_FLUSH_FAILED; + +	if (!ixgbe_qv_lock_poll(q_vector)) +		return LL_FLUSH_BUSY; + +	ixgbe_for_each_ring(ring, q_vector->rx) { +		found = ixgbe_clean_rx_irq(q_vector, ring, 4); +#ifdef LL_EXTENDED_STATS +		if (found) +			ring->stats.cleaned += found; +		else +			ring->stats.misses++; +#endif +		if (found) +			break; +	} + +	ixgbe_qv_unlock_poll(q_vector); + +	return found; +} +#endif	/* CONFIG_NET_LL_RX_POLL */ +  /**   * ixgbe_configure_msix - Configure MSI-X hardware   * @adapter: board private structure @@ -2550,6 +2587,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget)  	ixgbe_for_each_ring(ring, q_vector->tx)  		clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); +	if (!ixgbe_qv_lock_napi(q_vector)) +		return budget; +  	/* attempt to distribute budget to each queue fairly, but don't allow  	 * the budget to go below 1 because we'll exit polling */  	if (q_vector->rx.count > 1) @@ -2558,9 +2598,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)  		per_ring_budget = budget;  	ixgbe_for_each_ring(ring, q_vector->rx) -		clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, -						     per_ring_budget); +		clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, +				   per_ring_budget) < per_ring_budget); +	ixgbe_qv_unlock_napi(q_vector);  	/* If all work not completed, return budget and keep polling */  	if (!clean_complete)  		return budget; @@ -3747,16 +3788,25 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)  {  	int q_idx; -	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) +	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { +		ixgbe_qv_init_lock(adapter->q_vector[q_idx]);  		napi_enable(&adapter->q_vector[q_idx]->napi); +	}  }  static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)  {  	int q_idx; -	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) +	local_bh_disable(); /* for ixgbe_qv_lock_napi() */ +	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {  		napi_disable(&adapter->q_vector[q_idx]->napi); +		while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) { +			pr_info("QV %d locked\n", q_idx); +			mdelay(1); +		} +	} +	local_bh_enable();  }  #ifdef CONFIG_IXGBE_DCB @@ -7177,6 +7227,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {  #ifdef CONFIG_NET_POLL_CONTROLLER  	.ndo_poll_controller	= ixgbe_netpoll,  #endif +#ifdef CONFIG_NET_LL_RX_POLL +	.ndo_busy_poll		= ixgbe_low_latency_recv, +#endif  #ifdef IXGBE_FCOE  	.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,  	.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, |