diff options
Diffstat (limited to 'drivers/net/ethernet/pensando/ionic/ionic_txrx.c')
| -rw-r--r-- | drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 408 |
1 files changed, 255 insertions, 153 deletions
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index d233b6e77b1e..169ac4f54640 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -10,13 +10,19 @@ #include "ionic_lif.h" #include "ionic_txrx.h" -static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, void *cb_arg); +static void ionic_rx_clean(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, + void *cb_arg); + +static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); + +static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { - DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); + DEBUG_STATS_TXQ_POST(q, ring_dbell); ionic_q_post(q, ring_dbell, cb_func, cb_arg); } @@ -26,7 +32,7 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, { ionic_q_post(q, ring_dbell, cb_func, cb_arg); - DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q)); + DEBUG_STATS_RX_BUFF_CNT(q); } static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) @@ -43,7 +49,7 @@ static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, struct sk_buff *skb; netdev = lif->netdev; - stats = q_to_rx_stats(q); + stats = &q->lif->rxqstats[q->index]; if (frags) skb = napi_get_frags(&q_to_qcq(q)->napi); @@ -140,8 +146,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, return skb; } -static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, void *cb_arg) +static void ionic_rx_clean(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, + void *cb_arg) { struct ionic_rxq_comp *comp = cq_info->cq_desc; struct ionic_qcq *qcq = q_to_qcq(q); @@ -157,12 +165,6 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i return; } - /* no packet processing while resetting */ - if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) { - stats->dropped++; - return; - } - stats->pkts++; stats->bytes += le16_to_cpu(comp->len); @@ -210,10 +212,11 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) stats->csum_error++; - if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - le16_to_cpu(comp->vlan_tci)); + if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(comp->vlan_tci)); + stats->vlan_stripped++; } if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) @@ -232,14 +235,14 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) return false; /* check for empty queue */ - if (q->tail->index == q->head->index) + if (q->tail_idx == q->head_idx) return false; - desc_info = q->tail; - if (desc_info->index != le16_to_cpu(comp->comp_index)) + if (q->tail_idx != le16_to_cpu(comp->comp_index)) return false; - q->tail = desc_info->next; + desc_info = &q->info[q->tail_idx]; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); /* clean the related q entry, only one per qc completion */ ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); @@ -250,69 +253,62 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) return true; } -static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit) -{ - u32 work_done = 0; - - while (ionic_rx_service(rxcq, rxcq->tail)) { - if (rxcq->tail->last) - rxcq->done_color = !rxcq->done_color; - rxcq->tail = rxcq->tail->next; - DEBUG_STATS_CQE_CNT(rxcq); - - if (++work_done >= limit) - break; - } - - return work_done; -} - void ionic_rx_flush(struct ionic_cq *cq) { struct ionic_dev *idev = &cq->lif->ionic->idev; u32 work_done; - work_done = ionic_rx_walk_cq(cq, cq->num_descs); + work_done = ionic_cq_service(cq, cq->num_descs, + ionic_rx_service, NULL, NULL); if (work_done) ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, work_done, IONIC_INTR_CRED_RESET_COALESCE); } -static struct page *ionic_rx_page_alloc(struct ionic_queue *q, - dma_addr_t *dma_addr) +static int ionic_rx_page_alloc(struct ionic_queue *q, + struct ionic_page_info *page_info) { struct ionic_lif *lif = q->lif; struct ionic_rx_stats *stats; struct net_device *netdev; struct device *dev; - struct page *page; netdev = lif->netdev; dev = lif->ionic->dev; stats = q_to_rx_stats(q); - page = alloc_page(GFP_ATOMIC); - if (unlikely(!page)) { - net_err_ratelimited("%s: Page alloc failed on %s!\n", + + if (unlikely(!page_info)) { + net_err_ratelimited("%s: %s invalid page_info in alloc\n", + netdev->name, q->name); + return -EINVAL; + } + + page_info->page = dev_alloc_page(); + if (unlikely(!page_info->page)) { + net_err_ratelimited("%s: %s page alloc failed\n", netdev->name, q->name); stats->alloc_err++; - return NULL; + return -ENOMEM; } - *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, *dma_addr))) { - __free_page(page); - net_err_ratelimited("%s: DMA single map failed on %s!\n", + page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) { + put_page(page_info->page); + page_info->dma_addr = 0; + page_info->page = NULL; + net_err_ratelimited("%s: %s dma map failed\n", netdev->name, q->name); stats->dma_map_err++; - return NULL; + return -EIO; } - return page; + return 0; } -static void ionic_rx_page_free(struct ionic_queue *q, struct page *page, - dma_addr_t dma_addr) +static void ionic_rx_page_free(struct ionic_queue *q, + struct ionic_page_info *page_info) { struct ionic_lif *lif = q->lif; struct net_device *netdev; @@ -321,19 +317,24 @@ static void ionic_rx_page_free(struct ionic_queue *q, struct page *page, netdev = lif->netdev; dev = lif->ionic->dev; - if (unlikely(!page)) { - net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n", + if (unlikely(!page_info)) { + net_err_ratelimited("%s: %s invalid page_info in free\n", netdev->name, q->name); return; } - dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(!page_info->page)) { + net_err_ratelimited("%s: %s invalid page in free\n", + netdev->name, q->name); + return; + } - __free_page(page); -} + dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); -#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1) -#define IONIC_RX_RING_HEAD_BUF_SZ 2048 + put_page(page_info->page); + page_info->dma_addr = 0; + page_info->page = NULL; +} void ionic_rx_fill(struct ionic_queue *q) { @@ -346,7 +347,6 @@ void ionic_rx_fill(struct ionic_queue *q) unsigned int remain_len; unsigned int seg_len; unsigned int nfrags; - bool ring_doorbell; unsigned int i, j; unsigned int len; @@ -355,15 +355,13 @@ void ionic_rx_fill(struct ionic_queue *q) for (i = ionic_q_space_avail(q); i; i--) { remain_len = len; - desc_info = q->head; + desc_info = &q->info[q->head_idx]; desc = desc_info->desc; sg_desc = desc_info->sg_desc; page_info = &desc_info->pages[0]; if (page_info->page) { /* recycle the buffer */ - ring_doorbell = ((q->head->index + 1) & - IONIC_RX_RING_DOORBELL_STRIDE) == 0; - ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL); + ionic_rxq_post(q, false, ionic_rx_clean, NULL); continue; } @@ -371,8 +369,7 @@ void ionic_rx_fill(struct ionic_queue *q) desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; desc_info->npages = nfrags; - page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr); - if (unlikely(!page_info->page)) { + if (unlikely(ionic_rx_page_alloc(q, page_info))) { desc->addr = 0; desc->len = 0; return; @@ -389,8 +386,7 @@ void ionic_rx_fill(struct ionic_queue *q) continue; sg_elem = &sg_desc->elems[j]; - page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr); - if (unlikely(!page_info->page)) { + if (unlikely(ionic_rx_page_alloc(q, page_info))) { sg_elem->addr = 0; sg_elem->len = 0; return; @@ -402,10 +398,11 @@ void ionic_rx_fill(struct ionic_queue *q) page_info++; } - ring_doorbell = ((q->head->index + 1) & - IONIC_RX_RING_DOORBELL_STRIDE) == 0; - ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL); + ionic_rxq_post(q, false, ionic_rx_clean, NULL); } + + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head_idx); } static void ionic_rx_fill_cb(void *arg) @@ -415,58 +412,111 @@ static void ionic_rx_fill_cb(void *arg) void ionic_rx_empty(struct ionic_queue *q) { - struct ionic_desc_info *cur; + struct ionic_desc_info *desc_info; struct ionic_rxq_desc *desc; unsigned int i; + u16 idx; - for (cur = q->tail; cur != q->head; cur = cur->next) { - desc = cur->desc; + idx = q->tail_idx; + while (idx != q->head_idx) { + desc_info = &q->info[idx]; + desc = desc_info->desc; desc->addr = 0; desc->len = 0; - for (i = 0; i < cur->npages; i++) { - if (likely(cur->pages[i].page)) { - ionic_rx_page_free(q, cur->pages[i].page, - cur->pages[i].dma_addr); - cur->pages[i].page = NULL; - cur->pages[i].dma_addr = 0; - } - } + for (i = 0; i < desc_info->npages; i++) + ionic_rx_page_free(q, &desc_info->pages[i]); - cur->cb_arg = NULL; + desc_info->cb_arg = NULL; + idx = (idx + 1) & (q->num_descs - 1); } } -int ionic_rx_napi(struct napi_struct *napi, int budget) +static void ionic_dim_update(struct ionic_qcq *qcq) +{ + struct dim_sample dim_sample; + struct ionic_lif *lif; + unsigned int qi; + + if (!qcq->intr.dim_coal_hw) + return; + + lif = qcq->q.lif; + qi = qcq->cq.bound_q->index; + + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->rxqcqs[qi]->intr.index, + qcq->intr.dim_coal_hw); + + dim_update_sample(qcq->cq.bound_intr->rearm_count, + lif->txqstats[qi].pkts, + lif->txqstats[qi].bytes, + &dim_sample); + + net_dim(&qcq->dim, dim_sample); +} + +int ionic_tx_napi(struct napi_struct *napi, int budget) { struct ionic_qcq *qcq = napi_to_qcq(napi); - struct ionic_cq *rxcq = napi_to_cq(napi); - unsigned int qi = rxcq->bound_q->index; + struct ionic_cq *cq = napi_to_cq(napi); struct ionic_dev *idev; struct ionic_lif *lif; - struct ionic_cq *txcq; u32 work_done = 0; u32 flags = 0; - lif = rxcq->bound_q->lif; + lif = cq->bound_q->lif; idev = &lif->ionic->idev; - txcq = &lif->txqcqs[qi].qcq->cq; - ionic_tx_flush(txcq); + work_done = ionic_cq_service(cq, budget, + ionic_tx_service, NULL, NULL); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + ionic_dim_update(qcq); + flags |= IONIC_INTR_CRED_UNMASK; + cq->bound_intr->rearm_count++; + } - work_done = ionic_rx_walk_cq(rxcq, budget); + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(idev->intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +int ionic_rx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = napi_to_cq(napi); + struct ionic_dev *idev; + struct ionic_lif *lif; + u32 work_done = 0; + u32 flags = 0; + + lif = cq->bound_q->lif; + idev = &lif->ionic->idev; + + work_done = ionic_cq_service(cq, budget, + ionic_rx_service, NULL, NULL); if (work_done) - ionic_rx_fill_cb(rxcq->bound_q); + ionic_rx_fill(cq->bound_q); if (work_done < budget && napi_complete_done(napi, work_done)) { + ionic_dim_update(qcq); flags |= IONIC_INTR_CRED_UNMASK; - DEBUG_STATS_INTR_REARM(rxcq->bound_intr); + cq->bound_intr->rearm_count++; } if (work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; - ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, + ionic_intr_credits(idev->intr_ctrl, + cq->bound_intr->index, work_done, flags); } @@ -475,7 +525,50 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) return work_done; } -static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len) +int ionic_txrx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *rxcq = napi_to_cq(napi); + unsigned int qi = rxcq->bound_q->index; + struct ionic_dev *idev; + struct ionic_lif *lif; + struct ionic_cq *txcq; + u32 rx_work_done = 0; + u32 tx_work_done = 0; + u32 flags = 0; + + lif = rxcq->bound_q->lif; + idev = &lif->ionic->idev; + txcq = &lif->txqcqs[qi]->cq; + + tx_work_done = ionic_cq_service(txcq, lif->tx_budget, + ionic_tx_service, NULL, NULL); + + rx_work_done = ionic_cq_service(rxcq, budget, + ionic_rx_service, NULL, NULL); + if (rx_work_done) + ionic_rx_fill_cb(rxcq->bound_q); + + if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { + ionic_dim_update(qcq); + flags |= IONIC_INTR_CRED_UNMASK; + rxcq->bound_intr->rearm_count++; + } + + if (rx_work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, + tx_work_done + rx_work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); + DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); + + return rx_work_done; +} + +static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, + void *data, size_t len) { struct ionic_tx_stats *stats = q_to_tx_stats(q); struct device *dev = q->lif->ionic->dev; @@ -491,7 +584,8 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t return dma_addr; } -static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag, +static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, + const skb_frag_t *frag, size_t offset, size_t len) { struct ionic_tx_stats *stats = q_to_tx_stats(q); @@ -507,8 +601,10 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *fra return dma_addr; } -static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, void *cb_arg) +static void ionic_tx_clean(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, + void *cb_arg) { struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; struct ionic_txq_sg_elem *elem = sg_desc->elems; @@ -554,58 +650,54 @@ static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i } } -void ionic_tx_flush(struct ionic_cq *cq) +static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) { - struct ionic_txq_comp *comp = cq->tail->cq_desc; - struct ionic_dev *idev = &cq->lif->ionic->idev; + struct ionic_txq_comp *comp = cq_info->cq_desc; struct ionic_queue *q = cq->bound_q; struct ionic_desc_info *desc_info; - unsigned int work_done = 0; - - /* walk the completed cq entries */ - while (work_done < cq->num_descs && - color_match(comp->color, cq->done_color)) { - - /* clean the related q entries, there could be - * several q entries completed for each cq completion - */ - do { - desc_info = q->tail; - q->tail = desc_info->next; - ionic_tx_clean(q, desc_info, cq->tail, - desc_info->cb_arg); - desc_info->cb = NULL; - desc_info->cb_arg = NULL; - } while (desc_info->index != le16_to_cpu(comp->comp_index)); - - if (cq->tail->last) - cq->done_color = !cq->done_color; - - cq->tail = cq->tail->next; - comp = cq->tail->cq_desc; - DEBUG_STATS_CQE_CNT(cq); - - work_done++; - } + u16 index; + + if (!color_match(comp->color, cq->done_color)) + return false; + /* clean the related q entries, there could be + * several q entries completed for each cq completion + */ + do { + desc_info = &q->info[q->tail_idx]; + index = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + } while (index != le16_to_cpu(comp->comp_index)); + + return true; +} + +void ionic_tx_flush(struct ionic_cq *cq) +{ + struct ionic_dev *idev = &cq->lif->ionic->idev; + u32 work_done; + + work_done = ionic_cq_service(cq, cq->num_descs, + ionic_tx_service, NULL, NULL); if (work_done) ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, - work_done, 0); + work_done, IONIC_INTR_CRED_RESET_COALESCE); } void ionic_tx_empty(struct ionic_queue *q) { struct ionic_desc_info *desc_info; - int done = 0; /* walk the not completed tx entries, if any */ - while (q->head != q->tail) { - desc_info = q->tail; - q->tail = desc_info->next; + while (q->head_idx != q->tail_idx) { + desc_info = &q->info[q->tail_idx]; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); desc_info->cb = NULL; desc_info->cb_arg = NULL; - done++; } } @@ -689,8 +781,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem) { - struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; - struct ionic_txq_desc *desc = q->head->desc; + struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; *elem = sg_desc->elems; return desc; @@ -699,13 +791,13 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_desc_info *abort = q->head; + struct ionic_desc_info *rewind_desc_info; struct device *dev = q->lif->ionic->dev; - struct ionic_desc_info *rewind = abort; struct ionic_txq_sg_elem *elem; struct ionic_txq_desc *desc; unsigned int frag_left = 0; unsigned int offset = 0; + u16 abort = q->head_idx; unsigned int len_left; dma_addr_t desc_addr; unsigned int hdrlen; @@ -713,6 +805,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) unsigned int seglen; u64 total_bytes = 0; u64 total_pkts = 0; + u16 rewind = abort; unsigned int left; unsigned int len; unsigned int mss; @@ -852,23 +945,25 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) stats->pkts += total_pkts; stats->bytes += total_bytes; stats->tso++; + stats->tso_bytes += total_bytes; return 0; err_out_abort: - while (rewind->desc != q->head->desc) { - ionic_tx_clean(q, rewind, NULL, NULL); - rewind = rewind->next; + while (rewind != q->head_idx) { + rewind_desc_info = &q->info[rewind]; + ionic_tx_clean(q, rewind_desc_info, NULL, NULL); + rewind = (rewind + 1) & (q->num_descs - 1); } - q->head = abort; + q->head_idx = abort; return -ENOMEM; } static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) { + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_txq_desc *desc = q->head->desc; struct device *dev = q->lif->ionic->dev; dma_addr_t dma_addr; bool has_vlan; @@ -890,9 +985,12 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) flags, skb_shinfo(skb)->nr_frags, dma_addr); desc->cmd = cpu_to_le64(cmd); desc->len = cpu_to_le16(skb_headlen(skb)); - desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); desc->csum_offset = cpu_to_le16(skb->csum_offset); + if (has_vlan) { + desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); + stats->vlan_inserted++; + } if (skb->csum_not_inet) stats->crc32_csum++; @@ -904,8 +1002,8 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) { + struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_txq_desc *desc = q->head->desc; struct device *dev = q->lif->ionic->dev; dma_addr_t dma_addr; bool has_vlan; @@ -927,16 +1025,19 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) flags, skb_shinfo(skb)->nr_frags, dma_addr); desc->cmd = cpu_to_le64(cmd); desc->len = cpu_to_le16(skb_headlen(skb)); - desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); + if (has_vlan) { + desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); + stats->vlan_inserted++; + } - stats->no_csum++; + stats->csum_none++; return 0; } static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) { - struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; + struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; unsigned int len_left = skb->len - skb_headlen(skb); struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); @@ -989,6 +1090,7 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) { + int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); int err; @@ -997,7 +1099,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) return (skb->len / skb_shinfo(skb)->gso_size) + 1; /* If non-TSO, just need 1 desc and nr_frags sg elems */ - if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS) + if (skb_shinfo(skb)->nr_frags <= sg_elems) return 1; /* Too many frags, so linearize */ @@ -1044,9 +1146,9 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - if (unlikely(!lif_to_txqcq(lif, queue_index))) + if (unlikely(queue_index >= lif->nxqs)) queue_index = 0; - q = lif_to_txq(lif, queue_index); + q = &lif->txqcqs[queue_index]->q; ndescs = ionic_tx_descs_needed(q, skb); if (ndescs < 0) |