diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igc')
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_defines.h | 10 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_ethtool.c | 12 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 146 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_regs.h | 1 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_tsn.c | 30 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_tsn.h | 1 |
6 files changed, 142 insertions, 58 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 5c66b97c0cfa..f7311aeb293b 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -400,6 +400,15 @@ #define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ #define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ +/* Transmit Scheduling Latency */ +/* Latency between transmission scheduling (LaunchTime) and the time + * the packet is transmitted to the network in nanosecond. + */ +#define IGC_TXOFFSET_SPEED_10 0x000034BC +#define IGC_TXOFFSET_SPEED_100 0x00000578 +#define IGC_TXOFFSET_SPEED_1000 0x0000012C +#define IGC_TXOFFSET_SPEED_2500 0x00000578 + /* Time Sync Interrupt Causes */ #define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ #define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ @@ -610,7 +619,6 @@ #define IGC_MDIC_OP_WRITE 0x04000000 #define IGC_MDIC_OP_READ 0x08000000 #define IGC_MDIC_READY 0x10000000 -#define IGC_MDIC_INT_EN 0x20000000 #define IGC_MDIC_ERROR 0x40000000 #define IGC_N0_QUEUE -1 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 8cc077b712ad..5a26a7805ef8 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -839,15 +839,15 @@ static void igc_ethtool_get_stats(struct net_device *netdev, ring = adapter->tx_ring[j]; do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + start = u64_stats_fetch_begin(&ring->tx_syncp); data[i] = ring->tx_stats.packets; data[i + 1] = ring->tx_stats.bytes; data[i + 2] = ring->tx_stats.restart_queue; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + start = u64_stats_fetch_begin(&ring->tx_syncp2); restart2 = ring->tx_stats.restart_queue2; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); data[i + 2] += restart2; i += IGC_TX_QUEUE_STATS_LEN; @@ -855,13 +855,13 @@ static void igc_ethtool_get_stats(struct net_device *netdev, for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; do { - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + start = u64_stats_fetch_begin(&ring->rx_syncp); data[i] = ring->rx_stats.packets; data[i + 1] = ring->rx_stats.bytes; data[i + 2] = ring->rx_stats.drops; data[i + 3] = ring->rx_stats.csum_err; data[i + 4] = ring->rx_stats.alloc_failed; - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); i += IGC_RX_QUEUE_STATS_LEN; } spin_unlock(&adapter->stats64_lock); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index ebff0e04045d..1586e1e435c6 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2129,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) return ok; } -static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer, - struct xdp_frame *xdpf, - struct igc_ring *ring) -{ - dma_addr_t dma; - - dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); - if (dma_mapping_error(ring->dev, dma)) { - netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); - return -ENOMEM; - } - - buffer->type = IGC_TX_BUFFER_TYPE_XDP; - buffer->xdpf = xdpf; - buffer->protocol = 0; - buffer->bytecount = xdpf->len; - buffer->gso_segs = 1; - buffer->time_stamp = jiffies; - dma_unmap_len_set(buffer, len, xdpf->len); - dma_unmap_addr_set(buffer, dma, dma); - return 0; -} - /* This function requires __netif_tx_lock is held by the caller. */ static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, struct xdp_frame *xdpf) { - struct igc_tx_buffer *buffer; - union igc_adv_tx_desc *desc; - u32 cmd_type, olinfo_status; - int err; + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, index = ring->next_to_use; + struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; + struct igc_tx_buffer *buffer = head; + union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); + u32 olinfo_status, len = xdpf->len, cmd_type; + void *data = xdpf->data; + u16 i; - if (!igc_desc_unused(ring)) - return -EBUSY; + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); - buffer = &ring->tx_buffer_info[ring->next_to_use]; - err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); - if (err) - return err; + if (igc_maybe_stop_tx(ring, count + 3)) { + /* this is a hard error */ + return -EBUSY; + } - cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | - IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | - buffer->bytecount; - olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + i = 0; + head->bytecount = xdp_get_frame_len(xdpf); + head->type = IGC_TX_BUFFER_TYPE_XDP; + head->gso_segs = 1; + head->xdpf = xdpf; - desc = IGC_TX_DESC(ring, ring->next_to_use); - desc->read.cmd_type_len = cpu_to_le32(cmd_type); + olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; desc->read.olinfo_status = cpu_to_le32(olinfo_status); - desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); - netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount); + for (;;) { + dma_addr_t dma; - buffer->next_to_watch = desc; + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, + "Failed to map DMA for TX\n"); + goto unmap; + } - ring->next_to_use++; - if (ring->next_to_use == ring->count) - ring->next_to_use = 0; + dma_unmap_len_set(buffer, len, len); + dma_unmap_addr_set(buffer, dma, dma); + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | len; + + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.buffer_addr = cpu_to_le64(dma); + + buffer->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + buffer = &ring->tx_buffer_info[index]; + desc = IGC_TX_DESC(ring, index); + desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(ring), head->bytecount); + /* set the timestamp */ + head->time_stamp = jiffies; + /* set next_to_watch value indicating a packet is present */ + head->next_to_watch = desc; + ring->next_to_use = index; return 0; + +unmap: + for (;;) { + buffer = &ring->tx_buffer_info[index]; + if (dma_unmap_len(buffer, len)) + dma_unmap_page(ring->dev, + dma_unmap_addr(buffer, dma), + dma_unmap_len(buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(buffer, len, 0); + if (buffer == head) + break; + + if (!index) + index += ring->count; + index--; + } + + return -ENOMEM; } static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, @@ -2369,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), igc_rx_offset(rx_ring) + pkt_offset, size, true); + xdp_buff_clear_frags_flag(&xdp); skb = igc_xdp_run_prog(adapter, &xdp); } @@ -4356,8 +4394,7 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter, return -ENOMEM; /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, - igc_poll, 64); + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -4645,10 +4682,10 @@ void igc_update_stats(struct igc_adapter *adapter) } do { - start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + start = u64_stats_fetch_begin(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -4662,10 +4699,10 @@ void igc_update_stats(struct igc_adapter *adapter) struct igc_ring *ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + start = u64_stats_fetch_begin(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -5344,6 +5381,13 @@ static void igc_watchdog_task(struct work_struct *work) break; } + /* Once the launch time has been set on the wire, there + * is a delay before the link speed can be determined + * based on link-up activity. Write into the register + * as soon as we know the correct link speed. + */ + igc_tsn_adjust_txtime_offset(adapter); + if (adapter->link_speed != SPEED_1000) goto no_wait; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index c0d8214148d1..01c86d36856d 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -224,6 +224,7 @@ /* Transmit Scheduling Registers */ #define IGC_TQAVCTRL 0x3570 #define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_GTXOFFSET 0x3310 #define IGC_BASET_L 0x3314 #define IGC_BASET_H 0x3318 #define IGC_QBVCYCLET 0x331C diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 0fce22de2ab8..f975ed807da1 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -48,6 +48,35 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) return new_flags; } +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u16 txoffset; + + if (!is_any_launchtime(adapter)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + txoffset = IGC_TXOFFSET_SPEED_10; + break; + case SPEED_100: + txoffset = IGC_TXOFFSET_SPEED_100; + break; + case SPEED_1000: + txoffset = IGC_TXOFFSET_SPEED_1000; + break; + case SPEED_2500: + txoffset = IGC_TXOFFSET_SPEED_2500; + break; + default: + txoffset = 0; + break; + } + + wr32(IGC_GTXOFFSET, txoffset); +} + /* Returns the TSN specific registers to their default values after * the adapter is reset. */ @@ -57,6 +86,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) u32 tqavctrl; int i; + wr32(IGC_GTXOFFSET, 0); wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h index 1512307f5a52..b53e6af560b7 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.h +++ b/drivers/net/ethernet/intel/igc/igc_tsn.h @@ -6,5 +6,6 @@ int igc_tsn_offload_apply(struct igc_adapter *adapter); int igc_tsn_reset(struct igc_adapter *adapter); +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter); #endif /* _IGC_BASE_H */ |