aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c135
1 files changed, 61 insertions, 74 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 20d6764cedc8..e7109de2204a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5899,6 +5899,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -5941,10 +5944,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
- /* mss_l4len_id: use 1 as index for TSO */
+ /* mss_l4len_id: use 0 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
@@ -5966,12 +5968,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
- if (unlikely(skb->no_fcs))
- first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
- if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
- return;
- }
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+ !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+ return;
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
@@ -6029,30 +6028,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
type_tucmd, mss_l4len_idx);
}
-static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
+#define IXGBE_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-
- if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
+ IXGBE_ADVTXD_DCMD_VLE);
/* set segmentation enable bits for TSO/FSO */
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
+ IXGBE_ADVTXD_DCMD_TSE);
+
+ /* set timestamp bit if present */
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
+ IXGBE_ADVTXD_MAC_TSTAMP);
/* insert frame checksum */
- if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+ cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
return cmd_type;
}
@@ -6060,36 +6061,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
u32 tx_flags, unsigned int paylen)
{
- __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
/* enable L4 checksum for TSO and TX checksum offload */
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_CSUM,
+ IXGBE_ADVTXD_POPTS_TXSM);
/* enble IPv4 checksum for TSO */
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
-
- /* use index 1 context for TSO/FSO/FCOE */
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
- olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_IPV4,
+ IXGBE_ADVTXD_POPTS_IXSM);
/*
* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
*/
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TXSW)
-#endif
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_CC,
+ IXGBE_ADVTXD_CC);
- tx_desc->read.olinfo_status = olinfo_status;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
@@ -6099,22 +6091,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
const u8 hdr_len)
{
- dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
- __le32 cmd_type;
+ u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IXGBE_TX_DESC(tx_ring, i);
- ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
- cmd_type = ixgbe_tx_cmd_type(tx_flags);
+ ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
#ifdef IXGBE_FCOE
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
@@ -6128,19 +6120,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
#endif
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
+ tx_buffer = first;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
i++;
tx_desc++;
@@ -6148,18 +6143,18 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IXGBE_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
}
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
@@ -6167,6 +6162,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
#ifdef IXGBE_FCOE
size = min_t(unsigned int, data_len, skb_frag_size(frag));
@@ -6177,22 +6173,13 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
-
- frag++;
}
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
@@ -6453,7 +6440,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* Tx switch had been disabled.
*/
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- tx_flags |= IXGBE_TX_FLAGS_TXSW;
+ tx_flags |= IXGBE_TX_FLAGS_CC;
#endif
/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */