aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
4 files changed, 25 insertions, 10 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index b621f735cdc3..a047a2a4ddac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -121,9 +121,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
mlx5e_reset_icosq_cc_pc(icosq);
- mlx5e_free_rx_in_progress_descs(rq);
+ mlx5e_free_rx_missing_descs(rq);
if (xskrq)
- mlx5e_free_rx_in_progress_descs(xskrq);
+ mlx5e_free_rx_missing_descs(xskrq);
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mlx5e_activate_icosq(icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index cd7779a9d046..651be7aaf7d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -69,7 +69,7 @@ INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
-void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
+void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index eca9a11454e5..53eef689f225 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -301,6 +301,15 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
if (!rq->mpwqe.info)
return -ENOMEM;
+ /* For deferred page release (release right before alloc), make sure
+ * that on first round release is not called.
+ */
+ for (int i = 0; i < wq_sz; i++) {
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, i);
+
+ bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
+ }
+
mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
return 0;
@@ -1112,7 +1121,7 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
return -ETIMEDOUT;
}
-void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
+void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq;
u16 head;
@@ -1124,8 +1133,12 @@ void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
wq = &rq->mpwqe.wq;
head = wq->head;
- /* Outstanding UMR WQEs (in progress) start at wq->head */
- for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
+ /* Release WQEs that are in missing state: they have been
+ * popped from the list after completion but were not freed
+ * due to deferred release.
+ * Also free the linked-list reserved entry, hence the "+ 1".
+ */
+ for (i = 0; i < mlx5_wq_ll_missing(wq) + 1; i++) {
rq->dealloc_wqe(rq, head);
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
@@ -1152,7 +1165,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
- mlx5e_free_rx_in_progress_descs(rq);
+ mlx5e_free_rx_missing_descs(rq);
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index eab8cba33ce4..73bc373bf27d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -983,6 +983,11 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
head = rq->mpwqe.actual_wq_head;
i = missing;
do {
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
+
+ /* Deferred free for better page pool cache usage. */
+ mlx5e_free_rx_mpwqe(rq, wi, true);
+
alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
mlx5e_alloc_rx_mpwqe(rq, head);
@@ -1855,7 +1860,6 @@ mpwrq_cqe_out:
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
- mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
@@ -2173,7 +2177,6 @@ mpwrq_cqe_out:
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
- mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
@@ -2233,7 +2236,6 @@ mpwrq_cqe_out:
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
- mlx5e_free_rx_mpwqe(rq, wi, true);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}