diff options
author | Mark Brown <broonie@kernel.org> | 2020-12-11 17:47:55 +0000 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2020-12-11 17:47:55 +0000 |
commit | 031616c434db05ce766f76c62865f55698e0924f (patch) | |
tree | 7f29aa1ff3e7b51a8058cd570fb785c6e769b245 /drivers/net/ethernet/intel/ice/ice_base.c | |
parent | 064841ccfc49b2315dc0b797239862d3a343aa07 (diff) | |
parent | 85a7555575a0e48f9b73db310d0d762a08a46d63 (diff) |
Merge remote-tracking branch 'asoc/for-5.10' into asoc-linus
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_base.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_base.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 87008476d8fe..fe4320e2d1f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -308,12 +308,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring) xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index); - ring->xsk_umem = ice_xsk_umem(ring); - if (ring->xsk_umem) { + ring->xsk_pool = ice_xsk_pool(ring); + if (ring->xsk_pool) { xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->rx_buf_len = - xsk_umem_get_rx_frame_size(ring->xsk_umem); + xsk_pool_get_rx_frame_size(ring->xsk_pool); /* For AF_XDP ZC, we disallow packets to span on * multiple buffers, thus letting us skip that * handling in the fast-path. @@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) NULL); if (err) return err; - xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); @@ -417,9 +417,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); - if (ring->xsk_umem) { - if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) { - dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n", + if (ring->xsk_pool) { + if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { + dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", num_bufs, ring->q_index); dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n"); @@ -428,7 +428,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) err = ice_alloc_rx_bufs_zc(ring, num_bufs); if (err) - dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n", + dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", ring->q_index, pf_q); return 0; } |