diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 39 | 
1 files changed, 28 insertions, 11 deletions
| diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 185452abf32c..1f863a96a480 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)  		opt2 |= SACK_EN(1);  	if (wscale && enable_tcp_window_scaling)  		opt2 |= WND_SCALE_EN(1); +	if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { +		opt2 |= T5_OPT_2_VALID; +		opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); +	}  	t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);  	if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { @@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)  static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)  {  	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); -	state_set(&ep->com, ABORTING); +	__state_set(&ep->com, ABORTING);  	set_bit(ABORT_CONN, &ep->com.history);  	return send_abort(ep, skb, gfp);  } @@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)  	return credits;  } -static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) +static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)  {  	struct mpa_message *mpa;  	struct mpa_v2_conn_params *mpa_v2_params; @@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)  	struct c4iw_qp_attributes attrs;  	enum c4iw_qp_attr_mask mask;  	int err; +	int disconnect = 0;  	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); @@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)  	 * will abort the connection.  	 */  	if (stop_ep_timer(ep)) -		return; +		return 0;  	/*  	 * If we get more than the supported amount of private data @@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)  	 * if we don't even have the mpa message, then bail.  	 */  	if (ep->mpa_pkt_len < sizeof(*mpa)) -		return; +		return 0;  	mpa = (struct mpa_message *) ep->mpa_pkt;  	/* Validate MPA header. */ @@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)  	 * We'll continue process when more data arrives.  	 */  	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) -		return; +		return 0;  	if (mpa->flags & MPA_REJECT) {  		err = -ECONNREFUSED; @@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)  		attrs.layer_etype = LAYER_MPA | DDP_LLP;  		attrs.ecode = MPA_NOMATCH_RTR;  		attrs.next_state = C4IW_QP_STATE_TERMINATE; +		attrs.send_term = 1;  		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, -				C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); +				C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);  		err = -ENOMEM; +		disconnect = 1;  		goto out;  	} @@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)  		attrs.layer_etype = LAYER_MPA | DDP_LLP;  		attrs.ecode = MPA_INSUFF_IRD;  		attrs.next_state = C4IW_QP_STATE_TERMINATE; +		attrs.send_term = 1;  		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, -				C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); +				C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);  		err = -ENOMEM; +		disconnect = 1;  		goto out;  	}  	goto out; @@ -1366,7 +1375,7 @@ err:  	send_abort(ep, skb, GFP_KERNEL);  out:  	connect_reply_upcall(ep, err); -	return; +	return disconnect;  }  static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) @@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)  	unsigned int tid = GET_TID(hdr);  	struct tid_info *t = dev->rdev.lldi.tids;  	__u8 status = hdr->status; +	int disconnect = 0;  	ep = lookup_tid(t, tid);  	if (!ep) @@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)  	switch (ep->com.state) {  	case MPA_REQ_SENT:  		ep->rcv_seq += dlen; -		process_mpa_reply(ep, skb); +		disconnect = process_mpa_reply(ep, skb);  		break;  	case MPA_REQ_WAIT:  		ep->rcv_seq += dlen; @@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)  			       ep->com.state, ep->hwtid, status);  		attrs.next_state = C4IW_QP_STATE_TERMINATE;  		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, -			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); +			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); +		disconnect = 1;  		break;  	}  	default:  		break;  	}  	mutex_unlock(&ep->com.mutex); +	if (disconnect) +		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);  	return 0;  } @@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,  		if (tcph->ece && tcph->cwr)  			opt2 |= CCTRL_ECN(1);  	} +	if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { +		opt2 |= T5_OPT_2_VALID; +		opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); +	}  	rpl = cplhdr(skb);  	INIT_TP_WR(rpl, ep->hwtid); @@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)  			__func__, ep, ep->hwtid, ep->com.state);  		abort = 0;  	} -	mutex_unlock(&ep->com.mutex);  	if (abort)  		abort_connection(ep, NULL, GFP_KERNEL); +	mutex_unlock(&ep->com.mutex);  	c4iw_put_ep(&ep->com);  } |