diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
| -rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 17 | 
2 files changed, 8 insertions, 10 deletions
| diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d9bffcc93587..bb78142bca5e 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -636,6 +636,7 @@ struct mlx5_ib_mr {  	/* For ODP and implicit */  	atomic_t		num_deferred_work; +	wait_queue_head_t       q_deferred_work;  	struct xarray		implicit_children;  	union {  		struct rcu_head rcu; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 4216814ba871..bf50cd91f472 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -235,7 +235,8 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)  	mr->parent = NULL;  	mlx5_mr_cache_free(mr->dev, mr);  	ib_umem_odp_release(odp); -	atomic_dec(&imr->num_deferred_work); +	if (atomic_dec_and_test(&imr->num_deferred_work)) +		wake_up(&imr->q_deferred_work);  }  static void free_implicit_child_mr_work(struct work_struct *work) @@ -554,6 +555,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,  	imr->umem = &umem_odp->umem;  	imr->is_odp_implicit = true;  	atomic_set(&imr->num_deferred_work, 0); +	init_waitqueue_head(&imr->q_deferred_work);  	xa_init(&imr->implicit_children);  	err = mlx5_ib_update_xlt(imr, 0, @@ -611,10 +613,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)  	 * under xa_lock while the child is in the xarray. Thus at this point  	 * it is only decreasing, and all work holding it is now on the wq.  	 */ -	if (atomic_read(&imr->num_deferred_work)) { -		flush_workqueue(system_unbound_wq); -		WARN_ON(atomic_read(&imr->num_deferred_work)); -	} +	wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));  	/*  	 * Fence the imr before we destroy the children. This allows us to @@ -645,10 +644,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)  	/* Wait for all running page-fault handlers to finish. */  	synchronize_srcu(&mr->dev->odp_srcu); -	if (atomic_read(&mr->num_deferred_work)) { -		flush_workqueue(system_unbound_wq); -		WARN_ON(atomic_read(&mr->num_deferred_work)); -	} +	wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));  	dma_fence_odp_mr(mr);  } @@ -1720,7 +1716,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)  	u32 i;  	for (i = 0; i < work->num_sge; ++i) -		atomic_dec(&work->frags[i].mr->num_deferred_work); +		if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work)) +			wake_up(&work->frags[i].mr->q_deferred_work);  	kvfree(work);  } |