diff options
Diffstat (limited to 'drivers/infiniband/core')
| -rw-r--r-- | drivers/infiniband/core/cm.c | 3 | ||||
| -rw-r--r-- | drivers/infiniband/core/cma.c | 18 | ||||
| -rw-r--r-- | drivers/infiniband/core/counters.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/core/mad.c | 3 | ||||
| -rw-r--r-- | drivers/infiniband/core/rdma_core.c | 42 | ||||
| -rw-r--r-- | drivers/infiniband/core/sa_query.c | 38 | 
6 files changed, 67 insertions, 41 deletions
| diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 9ce787e37e22..dc0558b23158 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work)  static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,  				 struct cm_work *work) +	__releases(&cm_id_priv->lock)  {  	bool immediate; @@ -3675,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,  		return ret;  	}  	cm_id_priv->id.state = IB_CM_IDLE; +	spin_lock_irq(&cm.lock);  	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {  		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);  		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);  	} +	spin_unlock_irq(&cm.lock);  	return 0;  } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3d7cc9f0f3d4..c30cf5307ce3 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener(  {  	struct rdma_id_private *id_priv, *id_priv_dev; +	lockdep_assert_held(&lock); +  	if (!bind_list)  		return ERR_PTR(-EINVAL); @@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,  		}  	} +	mutex_lock(&lock);  	/*  	 * Net namespace might be getting deleted while route lookup,  	 * cm_id lookup is in progress. Therefore, perform netdevice @@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,  	id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);  err:  	rcu_read_unlock(); +	mutex_unlock(&lock);  	if (IS_ERR(id_priv) && *net_dev) {  		dev_put(*net_dev);  		*net_dev = NULL; @@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,  	struct net *net = id_priv->id.route.addr.dev_addr.net;  	int ret; +	lockdep_assert_held(&lock); +  	if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))  		return; @@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,  	u64 sid, mask;  	__be16 port; +	lockdep_assert_held(&lock); +  	addr = cma_src_addr(id_priv);  	port = htons(bind_list->port); @@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,  	struct rdma_bind_list *bind_list;  	int ret; +	lockdep_assert_held(&lock); +  	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);  	if (!bind_list)  		return -ENOMEM; @@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,  	struct sockaddr  *saddr = cma_src_addr(id_priv);  	__be16 dport = cma_port(daddr); +	lockdep_assert_held(&lock); +  	hlist_for_each_entry(cur_id, &bind_list->owners, node) {  		struct sockaddr  *cur_daddr = cma_dst_addr(cur_id);  		struct sockaddr  *cur_saddr = cma_src_addr(cur_id); @@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,  	unsigned int rover;  	struct net *net = id_priv->id.route.addr.dev_addr.net; +	lockdep_assert_held(&lock); +  	inet_get_local_port_range(net, &low, &high);  	remaining = (high - low) + 1;  	rover = prandom_u32() % remaining + low; @@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,  	struct rdma_id_private *cur_id;  	struct sockaddr *addr, *cur_addr; +	lockdep_assert_held(&lock); +  	addr = cma_src_addr(id_priv);  	hlist_for_each_entry(cur_id, &bind_list->owners, node) {  		if (id_priv == cur_id) @@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,  	unsigned short snum;  	int ret; +	lockdep_assert_held(&lock); +  	snum = ntohs(cma_port(cma_src_addr(id_priv)));  	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))  		return -EACCES; diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 2257d7f7810f..738d1faf4bba 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)  	return ret;  } -static void counter_history_stat_update(const struct rdma_counter *counter) +static void counter_history_stat_update(struct rdma_counter *counter)  {  	struct ib_device *dev = counter->device;  	struct rdma_port_counter *port_counter; @@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter)  	if (!port_counter->hstats)  		return; +	rdma_counter_query_stats(counter); +  	for (i = 0; i < counter->stats->num_counters; i++)  		port_counter->hstats->value[i] += counter->stats->value[i];  } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 186e0d652e8b..a09f8e3c7f3f 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)  	xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);  	flush_workqueue(port_priv->wq); -	ib_cancel_rmpp_recvs(mad_agent_priv);  	deref_mad_agent(mad_agent_priv);  	wait_for_completion(&mad_agent_priv->comp); +	ib_cancel_rmpp_recvs(mad_agent_priv);  	ib_mad_agent_security_cleanup(&mad_agent_priv->agent); @@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,  						 DMA_FROM_DEVICE);  		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,  						  sg_list.addr))) { +			kfree(mad_priv);  			ret = -ENOMEM;  			break;  		} diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 38de4942c682..6d3ed7c6e19e 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -470,40 +470,46 @@ static struct ib_uobject *  alloc_begin_fd_uobject(const struct uverbs_api_object *obj,  		       struct uverbs_attr_bundle *attrs)  { -	const struct uverbs_obj_fd_type *fd_type = -		container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); +	const struct uverbs_obj_fd_type *fd_type;  	int new_fd; -	struct ib_uobject *uobj; +	struct ib_uobject *uobj, *ret;  	struct file *filp; +	uobj = alloc_uobj(attrs, obj); +	if (IS_ERR(uobj)) +		return uobj; + +	fd_type = +		container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);  	if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && -		    fd_type->fops->release != &uverbs_async_event_release)) -		return ERR_PTR(-EINVAL); +		    fd_type->fops->release != &uverbs_async_event_release)) { +		ret = ERR_PTR(-EINVAL); +		goto err_fd; +	}  	new_fd = get_unused_fd_flags(O_CLOEXEC); -	if (new_fd < 0) -		return ERR_PTR(new_fd); - -	uobj = alloc_uobj(attrs, obj); -	if (IS_ERR(uobj)) +	if (new_fd < 0) { +		ret = ERR_PTR(new_fd);  		goto err_fd; +	}  	/* Note that uverbs_uobject_fd_release() is called during abort */  	filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,  				  fd_type->flags);  	if (IS_ERR(filp)) { -		uverbs_uobject_put(uobj); -		uobj = ERR_CAST(filp); -		goto err_fd; +		ret = ERR_CAST(filp); +		goto err_getfile;  	}  	uobj->object = filp;  	uobj->id = new_fd;  	return uobj; -err_fd: +err_getfile:  	put_unused_fd(new_fd); -	return uobj; +err_fd: +	uverbs_uobject_put(uobj); +	return ret;  }  struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, @@ -643,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,  {  	struct ib_uverbs_file *ufile = attrs->ufile; -	/* alloc_commit consumes the uobj kref */ -	uobj->uapi_object->type_class->alloc_commit(uobj); -  	/* kref is held so long as the uobj is on the uobj list. */  	uverbs_uobject_get(uobj);  	spin_lock_irq(&ufile->uobjects_lock); @@ -655,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,  	/* matches atomic_set(-1) in alloc_uobj */  	atomic_set(&uobj->usecnt, 0); +	/* alloc_commit consumes the uobj kref */ +	uobj->uapi_object->type_class->alloc_commit(uobj); +  	/* Matches the down_read in rdma_alloc_begin_uobject */  	up_read(&ufile->hw_destroy_rwsem);  } diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index a2ed09a3c714..8c930bf1df89 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)  	return len;  } -static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) +static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)  {  	struct sk_buff *skb = NULL;  	struct nlmsghdr *nlh;  	void *data;  	struct ib_sa_mad *mad;  	int len; +	unsigned long flags; +	unsigned long delay; +	gfp_t gfp_flag; +	int ret; + +	INIT_LIST_HEAD(&query->list); +	query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);  	mad = query->mad_buf->mad;  	len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); @@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)  	/* Repair the nlmsg header length */  	nlmsg_end(skb, nlh); -	return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); -} +	gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : +		GFP_NOWAIT; -static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) -{ -	unsigned long flags; -	unsigned long delay; -	int ret; +	spin_lock_irqsave(&ib_nl_request_lock, flags); +	ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); -	INIT_LIST_HEAD(&query->list); -	query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); +	if (ret) +		goto out; -	/* Put the request on the list first.*/ -	spin_lock_irqsave(&ib_nl_request_lock, flags); +	/* Put the request on the list.*/  	delay = msecs_to_jiffies(sa_local_svc_timeout_ms);  	query->timeout = delay + jiffies;  	list_add_tail(&query->list, &ib_nl_request_list);  	/* Start the timeout if this is the only request */  	if (ib_nl_request_list.next == &query->list)  		queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); -	spin_unlock_irqrestore(&ib_nl_request_lock, flags); -	ret = ib_nl_send_msg(query, gfp_mask); -	if (ret) { -		ret = -EIO; -		/* Remove the request */ -		spin_lock_irqsave(&ib_nl_request_lock, flags); -		list_del(&query->list); -		spin_unlock_irqrestore(&ib_nl_request_lock, flags); -	} +out: +	spin_unlock_irqrestore(&ib_nl_request_lock, flags);  	return ret;  } |