diff options
| author | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
| commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
| tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /drivers/infiniband/core/cma.c | |
| parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
| parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'drivers/infiniband/core/cma.c')
| -rw-r--r-- | drivers/infiniband/core/cma.c | 70 | 
1 files changed, 38 insertions, 32 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 308155937713..6b3f4384e46a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -624,22 +624,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)  	return id_priv->id.route.addr.src_addr.ss_family;  } -static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) +static int cma_set_default_qkey(struct rdma_id_private *id_priv)  {  	struct ib_sa_mcmember_rec rec;  	int ret = 0; -	if (id_priv->qkey) { -		if (qkey && id_priv->qkey != qkey) -			return -EINVAL; -		return 0; -	} - -	if (qkey) { -		id_priv->qkey = qkey; -		return 0; -	} -  	switch (id_priv->id.ps) {  	case RDMA_PS_UDP:  	case RDMA_PS_IB: @@ -659,6 +648,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)  	return ret;  } +static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) +{ +	if (!qkey || +	    (id_priv->qkey && (id_priv->qkey != qkey))) +		return -EINVAL; + +	id_priv->qkey = qkey; +	return 0; +} +  static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)  {  	dev_addr->dev_type = ARPHRD_INFINIBAND; @@ -710,8 +709,7 @@ cma_validate_port(struct ib_device *device, u32 port,  	}  	sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); -	if (ndev) -		dev_put(ndev); +	dev_put(ndev);  	return sgid_attr;  } @@ -1229,7 +1227,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,  	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;  	if (id_priv->id.qp_type == IB_QPT_UD) { -		ret = cma_set_qkey(id_priv, 0); +		ret = cma_set_default_qkey(id_priv);  		if (ret)  			return ret; @@ -2430,8 +2428,7 @@ err_unlock:  	mutex_unlock(&listen_id->handler_mutex);  net_dev_put: -	if (net_dev) -		dev_put(net_dev); +	dev_put(net_dev);  	return ret;  } @@ -3298,7 +3295,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)  	route->path_rec->traffic_class = tos;  	route->path_rec->mtu = iboe_get_mtu(ndev->mtu);  	route->path_rec->rate_selector = IB_SA_EQ; -	route->path_rec->rate = iboe_get_rate(ndev); +	route->path_rec->rate = IB_RATE_PORT_CURRENT;  	dev_put(ndev);  	route->path_rec->packet_life_time_selector = IB_SA_EQ;  	/* In case ACK timeout is set, use this value to calculate @@ -4569,7 +4566,10 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,  	memset(&rep, 0, sizeof rep);  	rep.status = status;  	if (status == IB_SIDR_SUCCESS) { -		ret = cma_set_qkey(id_priv, qkey); +		if (qkey) +			ret = cma_set_qkey(id_priv, qkey); +		else +			ret = cma_set_default_qkey(id_priv);  		if (ret)  			return ret;  		rep.qp_num = id_priv->qp_num; @@ -4774,9 +4774,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,  	enum ib_gid_type gid_type;  	struct net_device *ndev; -	if (!status) -		status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); -	else +	if (status)  		pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",  				     status); @@ -4804,7 +4802,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,  	}  	event->param.ud.qp_num = 0xFFFFFF; -	event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey); +	event->param.ud.qkey = id_priv->qkey;  out:  	if (ndev) @@ -4823,8 +4821,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)  	    READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)  		goto out; -	cma_make_mc_event(status, id_priv, multicast, &event, mc); -	ret = cma_cm_event_handler(id_priv, &event); +	ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); +	if (!ret) { +		cma_make_mc_event(status, id_priv, multicast, &event, mc); +		ret = cma_cm_event_handler(id_priv, &event); +	}  	rdma_destroy_ah_attr(&event.param.ud.ah_attr);  	WARN_ON(ret); @@ -4877,9 +4878,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,  	if (ret)  		return ret; -	ret = cma_set_qkey(id_priv, 0); -	if (ret) -		return ret; +	if (!id_priv->qkey) { +		ret = cma_set_default_qkey(id_priv); +		if (ret) +			return ret; +	}  	cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);  	rec.qkey = cpu_to_be32(id_priv->qkey); @@ -4956,15 +4959,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,  	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);  	ib.rec.pkey = cpu_to_be16(0xffff); -	if (id_priv->id.ps == RDMA_PS_UDP) -		ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); -  	if (dev_addr->bound_dev_if)  		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);  	if (!ndev)  		return -ENODEV; -	ib.rec.rate = iboe_get_rate(ndev); +	ib.rec.rate = IB_RATE_PORT_CURRENT;  	ib.rec.hop_limit = 1;  	ib.rec.mtu = iboe_get_mtu(ndev->mtu); @@ -4984,6 +4984,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,  	if (err || !ib.rec.mtu)  		return err ?: -EINVAL; +	if (!id_priv->qkey) +		cma_set_default_qkey(id_priv); +  	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,  		    &ib.rec.port_gid);  	INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); @@ -5009,6 +5012,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,  			    READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))  		return -EINVAL; +	if (id_priv->id.qp_type != IB_QPT_UD) +		return -EINVAL; +  	mc = kzalloc(sizeof(*mc), GFP_KERNEL);  	if (!mc)  		return -ENOMEM;  |