diff options
Diffstat (limited to 'drivers/infiniband/hw')
| -rw-r--r-- | drivers/infiniband/hw/hfi1/verbs.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 17 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.c | 2 | 
4 files changed, 13 insertions, 11 deletions
| diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 089e201d7550..2f6323ad9c59 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,  				       opa_get_lid(packet->dlid, 9B));  		if (!mcast)  			goto drop; +		rcu_read_lock();  		list_for_each_entry_rcu(p, &mcast->qp_list, list) {  			packet->qp = p->qp;  			if (hfi1_do_pkey_check(packet)) -				goto drop; +				goto unlock_drop;  			spin_lock_irqsave(&packet->qp->r_lock, flags);  			packet_handler = qp_ok(packet);  			if (likely(packet_handler)) @@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,  				ibp->rvp.n_pkt_drops++;  			spin_unlock_irqrestore(&packet->qp->r_lock, flags);  		} +		rcu_read_unlock();  		/*  		 * Notify rvt_multicast_detach() if it is waiting for us  		 * to finish. diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d9bffcc93587..bb78142bca5e 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -636,6 +636,7 @@ struct mlx5_ib_mr {  	/* For ODP and implicit */  	atomic_t		num_deferred_work; +	wait_queue_head_t       q_deferred_work;  	struct xarray		implicit_children;  	union {  		struct rcu_head rcu; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 4216814ba871..bf50cd91f472 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -235,7 +235,8 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)  	mr->parent = NULL;  	mlx5_mr_cache_free(mr->dev, mr);  	ib_umem_odp_release(odp); -	atomic_dec(&imr->num_deferred_work); +	if (atomic_dec_and_test(&imr->num_deferred_work)) +		wake_up(&imr->q_deferred_work);  }  static void free_implicit_child_mr_work(struct work_struct *work) @@ -554,6 +555,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,  	imr->umem = &umem_odp->umem;  	imr->is_odp_implicit = true;  	atomic_set(&imr->num_deferred_work, 0); +	init_waitqueue_head(&imr->q_deferred_work);  	xa_init(&imr->implicit_children);  	err = mlx5_ib_update_xlt(imr, 0, @@ -611,10 +613,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)  	 * under xa_lock while the child is in the xarray. Thus at this point  	 * it is only decreasing, and all work holding it is now on the wq.  	 */ -	if (atomic_read(&imr->num_deferred_work)) { -		flush_workqueue(system_unbound_wq); -		WARN_ON(atomic_read(&imr->num_deferred_work)); -	} +	wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));  	/*  	 * Fence the imr before we destroy the children. This allows us to @@ -645,10 +644,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)  	/* Wait for all running page-fault handlers to finish. */  	synchronize_srcu(&mr->dev->odp_srcu); -	if (atomic_read(&mr->num_deferred_work)) { -		flush_workqueue(system_unbound_wq); -		WARN_ON(atomic_read(&mr->num_deferred_work)); -	} +	wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));  	dma_fence_odp_mr(mr);  } @@ -1720,7 +1716,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)  	u32 i;  	for (i = 0; i < work->num_sge; ++i) -		atomic_dec(&work->frags[i].mr->num_deferred_work); +		if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work)) +			wake_up(&work->frags[i].mr->q_deferred_work);  	kvfree(work);  } diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 33778d451b82..5ef93f8f17a1 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)  		if (mcast == NULL)  			goto drop;  		this_cpu_inc(ibp->pmastats->n_multicast_rcv); +		rcu_read_lock();  		list_for_each_entry_rcu(p, &mcast->qp_list, list)  			qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); +		rcu_read_unlock();  		/*  		 * Notify rvt_multicast_detach() if it is waiting for us  		 * to finish. |