diff options
Diffstat (limited to 'drivers/net/ethernet/microsoft/mana')
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/gdma_main.c | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/hw_channel.c | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_en.c | 142 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 15 |
4 files changed, 195 insertions, 21 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 8f3f78b68592..6367de0c2c2e 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -106,6 +106,25 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) return 0; } +static int mana_gd_query_hwc_timeout(struct pci_dev *pdev, u32 *timeout_val) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_query_hwc_timeout_resp resp = {}; + struct gdma_query_hwc_timeout_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_HWC_TIMEOUT, + sizeof(req), sizeof(resp)); + req.timeout_ms = *timeout_val; + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) + return err ? err : -EPROTO; + + *timeout_val = resp.timeout_ms; + + return 0; +} + static int mana_gd_detect_devices(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); @@ -300,8 +319,11 @@ static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index, void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) { + /* Hardware Spec specifies that software client should set 0 for + * wqe_cnt for Receive Queues. This value is not used in Send Queues. + */ mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type, - queue->id, queue->head * GDMA_WQE_BU_SIZE, 1); + queue->id, queue->head * GDMA_WQE_BU_SIZE, 0); } void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit) @@ -879,8 +901,10 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) struct gdma_context *gc = pci_get_drvdata(pdev); struct gdma_verify_ver_resp resp = {}; struct gdma_verify_ver_req req = {}; + struct hw_channel_context *hwc; int err; + hwc = gc->hwc.driver_data; mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION, sizeof(req), sizeof(resp)); @@ -907,7 +931,14 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) err, resp.hdr.status); return err ? err : -EPROTO; } - + if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) { + err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout); + if (err) { + dev_err(gc->dev, "Failed to set the hwc timeout %d\n", err); + return err; + } + dev_dbg(gc->dev, "set the hwc timeout to %u\n", hwc->hwc_timeout); + } return 0; } diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 2bd1d74021f7..9d1cd3bfcf66 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -174,7 +174,25 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, complete(&hwc->hwc_init_eqe_comp); break; + case GDMA_EQE_HWC_SOC_RECONFIG_DATA: + type_data.as_uint32 = event->details[0]; + type = type_data.type; + val = type_data.value; + + switch (type) { + case HWC_DATA_CFG_HWC_TIMEOUT: + hwc->hwc_timeout = val; + break; + + default: + dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type); + break; + } + + break; + default: + dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type); /* Ignore unknown events, which should never happen. */ break; } @@ -696,6 +714,7 @@ int mana_hwc_create_channel(struct gdma_context *gc) gd->driver_data = hwc; hwc->gdma_dev = gd; hwc->dev = gc->dev; + hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS; /* HWC's instance number is always 0. */ gd->dev_id.as_uint32 = 0; @@ -770,6 +789,8 @@ void mana_hwc_destroy_channel(struct gdma_context *gc) hwc->gdma_dev->doorbell = INVALID_DOORBELL; hwc->gdma_dev->pdid = INVALID_PDID; + hwc->hwc_timeout = 0; + kfree(hwc); gc->hwc.driver_data = NULL; gc->hwc.gdma_context = NULL; @@ -825,7 +846,8 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, goto out; } - if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) { + if (!wait_for_completion_timeout(&ctx->comp_event, + (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) { dev_err(hwc->dev, "HWC: Request timed out!\n"); err = -ETIMEDOUT; goto out; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index c2ad0921e893..4a16ebff3d1d 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -12,6 +12,8 @@ #include <net/checksum.h> #include <net/ip6_checksum.h> +#include <net/page_pool/helpers.h> +#include <net/xdp.h> #include <net/mana/mana.h> #include <net/mana/mana_auxiliary.h> @@ -1387,8 +1389,8 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq) recv_buf_oob = &rxq->rx_oobs[curr_index]; - err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req, - &recv_buf_oob->wqe_inf); + err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, + &recv_buf_oob->wqe_inf); if (WARN_ON_ONCE(err)) return; @@ -1415,8 +1417,8 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, return skb; } -static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, - struct mana_rxq *rxq) +static void mana_rx_skb(void *buf_va, bool from_pool, + struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) { struct mana_stats_rx *rx_stats = &rxq->stats; struct net_device *ndev = rxq->ndev; @@ -1449,6 +1451,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, if (!skb) goto drop; + if (from_pool) + skb_mark_for_recycle(skb); + skb->dev = napi->dev; skb->protocol = eth_type_trans(skb, ndev); @@ -1499,9 +1504,14 @@ drop_xdp: u64_stats_update_end(&rx_stats->syncp); drop: - WARN_ON_ONCE(rxq->xdp_save_va); - /* Save for reuse */ - rxq->xdp_save_va = buf_va; + if (from_pool) { + page_pool_recycle_direct(rxq->page_pool, + virt_to_head_page(buf_va)); + } else { + WARN_ON_ONCE(rxq->xdp_save_va); + /* Save for reuse */ + rxq->xdp_save_va = buf_va; + } ++ndev->stats.rx_dropped; @@ -1509,11 +1519,13 @@ drop: } static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, - dma_addr_t *da, bool is_napi) + dma_addr_t *da, bool *from_pool, bool is_napi) { struct page *page; void *va; + *from_pool = false; + /* Reuse XDP dropped page if available */ if (rxq->xdp_save_va) { va = rxq->xdp_save_va; @@ -1534,17 +1546,22 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, return NULL; } } else { - page = dev_alloc_page(); + page = page_pool_dev_alloc_pages(rxq->page_pool); if (!page) return NULL; + *from_pool = true; va = page_to_virt(page); } *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, DMA_FROM_DEVICE); if (dma_mapping_error(dev, *da)) { - put_page(virt_to_head_page(va)); + if (*from_pool) + page_pool_put_full_page(rxq->page_pool, page, false); + else + put_page(virt_to_head_page(va)); + return NULL; } @@ -1553,21 +1570,25 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, /* Allocate frag for rx buffer, and save the old buf */ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, - struct mana_recv_buf_oob *rxoob, void **old_buf) + struct mana_recv_buf_oob *rxoob, void **old_buf, + bool *old_fp) { + bool from_pool; dma_addr_t da; void *va; - va = mana_get_rxfrag(rxq, dev, &da, true); + va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true); if (!va) return; dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, DMA_FROM_DEVICE); *old_buf = rxoob->buf_va; + *old_fp = rxoob->from_pool; rxoob->buf_va = va; rxoob->sgl[0].address = da; + rxoob->from_pool = from_pool; } static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, @@ -1581,6 +1602,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, struct device *dev = gc->dev; void *old_buf = NULL; u32 curr, pktlen; + bool old_fp; apc = netdev_priv(ndev); @@ -1623,12 +1645,12 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, rxbuf_oob = &rxq->rx_oobs[curr]; WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); - mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf); + mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp); /* Unsuccessful refill will have old_buf == NULL. * In this case, mana_rx_skb() will drop the packet. */ - mana_rx_skb(old_buf, oob, rxq); + mana_rx_skb(old_buf, old_fp, oob, rxq); drop: mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); @@ -1658,6 +1680,12 @@ static void mana_poll_rx_cq(struct mana_cq *cq) mana_process_rx_cqe(rxq, cq, &comp[i]); } + if (comp_read > 0) { + struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; + + mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); + } + if (rxq->xdp_flush) xdp_do_flush(); } @@ -1882,6 +1910,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc, struct mana_recv_buf_oob *rx_oob; struct device *dev = gc->dev; struct napi_struct *napi; + struct page *page; int i; if (!rxq) @@ -1914,10 +1943,18 @@ static void mana_destroy_rxq(struct mana_port_context *apc, dma_unmap_single(dev, rx_oob->sgl[0].address, rx_oob->sgl[0].size, DMA_FROM_DEVICE); - put_page(virt_to_head_page(rx_oob->buf_va)); + page = virt_to_head_page(rx_oob->buf_va); + + if (rx_oob->from_pool) + page_pool_put_full_page(rxq->page_pool, page, false); + else + put_page(page); + rx_oob->buf_va = NULL; } + page_pool_destroy(rxq->page_pool); + if (rxq->gdma_rq) mana_gd_destroy_queue(gc, rxq->gdma_rq); @@ -1928,18 +1965,20 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, struct mana_rxq *rxq, struct device *dev) { struct mana_port_context *mpc = netdev_priv(rxq->ndev); + bool from_pool = false; dma_addr_t da; void *va; if (mpc->rxbufs_pre) va = mana_get_rxbuf_pre(rxq, &da); else - va = mana_get_rxfrag(rxq, dev, &da, false); + va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false); if (!va) return -ENOMEM; rx_oob->buf_va = va; + rx_oob->from_pool = from_pool; rx_oob->sgl[0].address = da; rx_oob->sgl[0].size = rxq->datasize; @@ -2009,6 +2048,26 @@ static int mana_push_wqe(struct mana_rxq *rxq) return 0; } +static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) +{ + struct page_pool_params pprm = {}; + int ret; + + pprm.pool_size = RX_BUFFERS_PER_QUEUE; + pprm.nid = gc->numa_node; + pprm.napi = &rxq->rx_cq.napi; + + rxq->page_pool = page_pool_create(&pprm); + + if (IS_ERR(rxq->page_pool)) { + ret = PTR_ERR(rxq->page_pool); + rxq->page_pool = NULL; + return ret; + } + + return 0; +} + static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, u32 rxq_idx, struct mana_eq *eq, struct net_device *ndev) @@ -2038,6 +2097,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, &rxq->headroom); + /* Create page pool for RX queue */ + err = mana_create_page_pool(rxq, gc); + if (err) { + netdev_err(ndev, "Create page pool err:%d\n", err); + goto out; + } + err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); if (err) goto out; @@ -2109,8 +2175,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, cq->napi.napi_id)); - WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL)); + WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, + rxq->page_pool)); napi_enable(&cq->napi); @@ -2229,6 +2295,46 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, return 0; } +void mana_query_gf_stats(struct mana_port_context *apc) +{ + struct mana_query_gf_stat_resp resp = {}; + struct mana_query_gf_stat_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT, + sizeof(req), sizeof(resp)); + req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES | + STATISTICS_FLAGS_HC_TX_UCAST_PACKETS | + STATISTICS_FLAGS_HC_TX_UCAST_BYTES | + STATISTICS_FLAGS_HC_TX_MCAST_PACKETS | + STATISTICS_FLAGS_HC_TX_MCAST_BYTES | + STATISTICS_FLAGS_HC_TX_BCAST_PACKETS | + STATISTICS_FLAGS_HC_TX_BCAST_BYTES; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(ndev, "Failed to query GF stats: %d\n", err); + return; + } + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err, + resp.hdr.status); + return; + } + + apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; + apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; + apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; + apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; + apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; + apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; + apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; +} + static int mana_init_port(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index 0dc78679f620..607150165ab4 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -13,6 +13,19 @@ static const struct { } mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, + {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)}, + {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats, + hc_tx_ucast_pkts)}, + {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats, + hc_tx_ucast_bytes)}, + {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats, + hc_tx_bcast_pkts)}, + {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats, + hc_tx_bcast_bytes)}, + {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats, + hc_tx_mcast_pkts)}, + {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats, + hc_tx_mcast_bytes)}, {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, tx_cqe_unknown_type)}, @@ -114,6 +127,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev, if (!apc->port_is_up) return; + /* we call mana function to update stats from GDMA */ + mana_query_gf_stats(apc); for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); |