diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 210 | 
1 files changed, 180 insertions, 30 deletions
| diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1586e1e435c6..44b1740dc098 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)  	return netdev_mc_count(netdev);  } -static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, +				bool *first_flag, bool *insert_empty)  { +	struct igc_adapter *adapter = netdev_priv(ring->netdev);  	ktime_t cycle_time = adapter->cycle_time;  	ktime_t base_time = adapter->base_time; +	ktime_t now = ktime_get_clocktai(); +	ktime_t baset_est, end_of_cycle;  	u32 launchtime; +	s64 n; -	/* FIXME: when using ETF together with taprio, we may have a -	 * case where 'delta' is larger than the cycle_time, this may -	 * cause problems if we don't read the current value of -	 * IGC_BASET, as the value writen into the launchtime -	 * descriptor field may be misinterpreted. +	n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + +	baset_est = ktime_add_ns(base_time, cycle_time * (n)); +	end_of_cycle = ktime_add_ns(baset_est, cycle_time); + +	if (ktime_compare(txtime, end_of_cycle) >= 0) { +		if (baset_est != ring->last_ff_cycle) { +			*first_flag = true; +			ring->last_ff_cycle = baset_est; + +			if (ktime_compare(txtime, ring->last_tx_cycle) > 0) +				*insert_empty = true; +		} +	} + +	/* Introducing a window at end of cycle on which packets +	 * potentially not honor launchtime. Window of 5us chosen +	 * considering software update the tail pointer and packets +	 * are dma'ed to packet buffer.  	 */ -	div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); +	if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) +		netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", +			    txtime); + +	ring->last_tx_cycle = end_of_cycle; + +	launchtime = ktime_sub_ns(txtime, baset_est); +	if (launchtime > 0) +		div_s64_rem(launchtime, cycle_time, &launchtime); +	else +		launchtime = 0;  	return cpu_to_le32(launchtime);  } +static int igc_init_empty_frame(struct igc_ring *ring, +				struct igc_tx_buffer *buffer, +				struct sk_buff *skb) +{ +	unsigned int size; +	dma_addr_t dma; + +	size = skb_headlen(skb); + +	dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); +	if (dma_mapping_error(ring->dev, dma)) { +		netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); +		return -ENOMEM; +	} + +	buffer->skb = skb; +	buffer->protocol = 0; +	buffer->bytecount = skb->len; +	buffer->gso_segs = 1; +	buffer->time_stamp = jiffies; +	dma_unmap_len_set(buffer, len, skb->len); +	dma_unmap_addr_set(buffer, dma, dma); + +	return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, +					struct sk_buff *skb, +					struct igc_tx_buffer *first) +{ +	union igc_adv_tx_desc *desc; +	u32 cmd_type, olinfo_status; +	int err; + +	if (!igc_desc_unused(ring)) +		return -EBUSY; + +	err = igc_init_empty_frame(ring, first, skb); +	if (err) +		return err; + +	cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | +		   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | +		   first->bytecount; +	olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + +	desc = IGC_TX_DESC(ring, ring->next_to_use); +	desc->read.cmd_type_len = cpu_to_le32(cmd_type); +	desc->read.olinfo_status = cpu_to_le32(olinfo_status); +	desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + +	netdev_tx_sent_queue(txring_txq(ring), skb->len); + +	first->next_to_watch = desc; + +	ring->next_to_use++; +	if (ring->next_to_use == ring->count) +		ring->next_to_use = 0; + +	return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 +  static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, -			    struct igc_tx_buffer *first, +			    __le32 launch_time, bool first_flag,  			    u32 vlan_macip_lens, u32 type_tucmd,  			    u32 mss_l4len_idx)  { @@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,  	if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))  		mss_l4len_idx |= tx_ring->reg_idx << 4; +	if (first_flag) +		mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; +  	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);  	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);  	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx); - -	/* We assume there is always a valid Tx time available. Invalid times -	 * should have been handled by the upper layers. -	 */ -	if (tx_ring->launchtime_enable) { -		struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); -		ktime_t txtime = first->skb->tstamp; - -		skb_txtime_consumed(first->skb); -		context_desc->launch_time = igc_tx_launchtime(adapter, -							      txtime); -	} else { -		context_desc->launch_time = 0; -	} +	context_desc->launch_time	= launch_time;  } -static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, +			__le32 launch_time, bool first_flag)  {  	struct sk_buff *skb = first->skb;  	u32 vlan_macip_lens = 0; @@ -1096,7 +1180,8 @@ no_csum:  	vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;  	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; -	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); +	igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, +			vlan_macip_lens, type_tucmd, 0);  }  static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) @@ -1320,6 +1405,7 @@ dma_error:  static int igc_tso(struct igc_ring *tx_ring,  		   struct igc_tx_buffer *first, +		   __le32 launch_time, bool first_flag,  		   u8 *hdr_len)  {  	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; @@ -1406,8 +1492,8 @@ static int igc_tso(struct igc_ring *tx_ring,  	vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;  	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; -	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, -			type_tucmd, mss_l4len_idx); +	igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, +			vlan_macip_lens, type_tucmd, mss_l4len_idx);  	return 1;  } @@ -1415,11 +1501,14 @@ static int igc_tso(struct igc_ring *tx_ring,  static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,  				       struct igc_ring *tx_ring)  { +	bool first_flag = false, insert_empty = false;  	u16 count = TXD_USE_COUNT(skb_headlen(skb));  	__be16 protocol = vlan_get_protocol(skb);  	struct igc_tx_buffer *first; +	__le32 launch_time = 0;  	u32 tx_flags = 0;  	unsigned short f; +	ktime_t txtime;  	u8 hdr_len = 0;  	int tso = 0; @@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,  		count += TXD_USE_COUNT(skb_frag_size(  						&skb_shinfo(skb)->frags[f])); -	if (igc_maybe_stop_tx(tx_ring, count + 3)) { +	if (igc_maybe_stop_tx(tx_ring, count + 5)) {  		/* this is a hard error */  		return NETDEV_TX_BUSY;  	} +	if (!tx_ring->launchtime_enable) +		goto done; + +	txtime = skb->tstamp; +	skb->tstamp = ktime_set(0, 0); +	launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + +	if (insert_empty) { +		struct igc_tx_buffer *empty_info; +		struct sk_buff *empty; +		void *data; + +		empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; +		empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); +		if (!empty) +			goto done; + +		data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); +		memset(data, 0, IGC_EMPTY_FRAME_SIZE); + +		igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + +		if (igc_init_tx_empty_descriptor(tx_ring, +						 empty, +						 empty_info) < 0) +			dev_kfree_skb_any(empty); +	} + +done:  	/* record the location of the first descriptor for this packet */  	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];  	first->type = IGC_TX_BUFFER_TYPE_SKB; @@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,  	first->tx_flags = tx_flags;  	first->protocol = protocol; -	tso = igc_tso(tx_ring, first, &hdr_len); +	tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);  	if (tso < 0)  		goto out_drop;  	else if (!tso) -		igc_tx_csum(tx_ring, first); +		igc_tx_csum(tx_ring, first, launch_time, first_flag);  	igc_tx_map(tx_ring, first, hdr_len); @@ -5925,10 +6043,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  	bool queue_configured[IGC_MAX_TX_QUEUES] = { };  	u32 start_time = 0, end_time = 0;  	size_t n; +	int i; + +	adapter->qbv_enable = qopt->enable;  	if (!qopt->enable)  		return igc_tsn_clear_schedule(adapter); +	if (qopt->base_time < 0) +		return -ERANGE; +  	if (adapter->base_time)  		return -EALREADY; @@ -5940,10 +6064,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  	for (n = 0; n < qopt->num_entries; n++) {  		struct tc_taprio_sched_entry *e = &qopt->entries[n]; -		int i;  		end_time += e->interval; +		/* If any of the conditions below are true, we need to manually +		 * control the end time of the cycle. +		 * 1. Qbv users can specify a cycle time that is not equal +		 * to the total GCL intervals. Hence, recalculation is +		 * necessary here to exclude the time interval that +		 * exceeds the cycle time. +		 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, +		 * once the end of the list is reached, it will switch +		 * to the END_OF_CYCLE state and leave the gates in the +		 * same state until the next cycle is started. +		 */ +		if (end_time > adapter->cycle_time || +		    n + 1 == qopt->num_entries) +			end_time = adapter->cycle_time; +  		for (i = 0; i < adapter->num_tx_queues; i++) {  			struct igc_ring *ring = adapter->tx_ring[i]; @@ -5964,6 +6102,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,  		start_time += e->interval;  	} +	/* Check whether a queue gets configured. +	 * If not, set the start and end time to be end time. +	 */ +	for (i = 0; i < adapter->num_tx_queues; i++) { +		if (!queue_configured[i]) { +			struct igc_ring *ring = adapter->tx_ring[i]; + +			ring->start_time = end_time; +			ring->end_time = end_time; +		} +	} +  	return 0;  } |