diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
| -rw-r--r-- | net/ipv4/tcp_input.c | 27 | 
1 files changed, 19 insertions, 8 deletions
| diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 12fda8f27b08..518f04355fbf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)  		 * cwnd may be very low (even just 1 packet), so we should ACK  		 * immediately.  		 */ -		inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; +		if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) +			inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;  	}  } @@ -3487,10 +3488,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)  	}  } -/* This routine deals with acks during a TLP episode. - * We mark the end of a TLP episode on receiving TLP dupack or when - * ack is after tlp_high_seq. - * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. +/* This routine deals with acks during a TLP episode and ends an episode by + * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack   */  static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)  { @@ -3499,7 +3498,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)  	if (before(ack, tp->tlp_high_seq))  		return; -	if (flag & FLAG_DSACKING_ACK) { +	if (!tp->tlp_retrans) { +		/* TLP of new data has been acknowledged */ +		tp->tlp_high_seq = 0; +	} else if (flag & FLAG_DSACKING_ACK) {  		/* This DSACK means original and TLP probe arrived; no loss */  		tp->tlp_high_seq = 0;  	} else if (after(ack, tp->tlp_high_seq)) { @@ -3665,6 +3667,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)  		tcp_in_ack_event(sk, ack_ev_flags);  	} +	/* This is a deviation from RFC3168 since it states that: +	 * "When the TCP data sender is ready to set the CWR bit after reducing +	 * the congestion window, it SHOULD set the CWR bit only on the first +	 * new data packet that it transmits." +	 * We accept CWR on pure ACKs to be more robust +	 * with widely-deployed TCP implementations that do this. +	 */ +	tcp_ecn_accept_cwr(sk, skb); +  	/* We passed data and got it acked, remove any soft error  	 * log. Something worked...  	 */ @@ -4572,6 +4583,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)  	if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {  		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); +		sk->sk_data_ready(sk);  		tcp_drop(sk, skb);  		return;  	} @@ -4800,8 +4812,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)  	skb_dst_drop(skb);  	__skb_pull(skb, tcp_hdr(skb)->doff * 4); -	tcp_ecn_accept_cwr(sk, skb); -  	tp->rx_opt.dsack = 0;  	/*  Queue data for delivery to the user. @@ -4820,6 +4830,7 @@ queue_and_out:  			sk_forced_mem_schedule(sk, skb->truesize);  		else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {  			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); +			sk->sk_data_ready(sk);  			goto drop;  		} |