diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
| -rw-r--r-- | net/ipv4/tcp_input.c | 55 | 
1 files changed, 39 insertions, 16 deletions
| diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3a4d9b34bed4..c9ab964189a0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1820,14 +1820,12 @@ advance_sp:  	for (j = 0; j < used_sacks; j++)  		tp->recv_sack_cache[i++] = sp[j]; -	tcp_mark_lost_retrans(sk); - -	tcp_verify_left_out(tp); -  	if ((state.reord < tp->fackets_out) &&  	    ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))  		tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); +	tcp_mark_lost_retrans(sk); +	tcp_verify_left_out(tp);  out:  #if FASTRETRANS_DEBUG > 0 @@ -2700,16 +2698,21 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)  	struct tcp_sock *tp = tcp_sk(sk);  	bool recovered = !before(tp->snd_una, tp->high_seq); +	if ((flag & FLAG_SND_UNA_ADVANCED) && +	    tcp_try_undo_loss(sk, false)) +		return; +  	if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */  		/* Step 3.b. A timeout is spurious if not all data are  		 * lost, i.e., never-retransmitted data are (s)acked.  		 */ -		if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED)) +		if ((flag & FLAG_ORIG_SACK_ACKED) && +		    tcp_try_undo_loss(sk, true))  			return; -		if (after(tp->snd_nxt, tp->high_seq) && -		    (flag & FLAG_DATA_SACKED || is_dupack)) { -			tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ +		if (after(tp->snd_nxt, tp->high_seq)) { +			if (flag & FLAG_DATA_SACKED || is_dupack) +				tp->frto = 0; /* Step 3.a. loss was real */  		} else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {  			tp->high_seq = tp->snd_nxt;  			__tcp_push_pending_frames(sk, tcp_current_mss(sk), @@ -2734,8 +2737,6 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)  		else if (flag & FLAG_SND_UNA_ADVANCED)  			tcp_reset_reno_sack(tp);  	} -	if (tcp_try_undo_loss(sk, false)) -		return;  	tcp_xmit_retransmit_queue(sk);  } @@ -3280,6 +3281,28 @@ static inline bool tcp_may_update_window(const struct tcp_sock *tp,  		(ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);  } +/* If we update tp->snd_una, also update tp->bytes_acked */ +static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) +{ +	u32 delta = ack - tp->snd_una; + +	u64_stats_update_begin(&tp->syncp); +	tp->bytes_acked += delta; +	u64_stats_update_end(&tp->syncp); +	tp->snd_una = ack; +} + +/* If we update tp->rcv_nxt, also update tp->bytes_received */ +static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) +{ +	u32 delta = seq - tp->rcv_nxt; + +	u64_stats_update_begin(&tp->syncp); +	tp->bytes_received += delta; +	u64_stats_update_end(&tp->syncp); +	tp->rcv_nxt = seq; +} +  /* Update our send window.   *   * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 @@ -3315,7 +3338,7 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32  		}  	} -	tp->snd_una = ack; +	tcp_snd_una_update(tp, ack);  	return flag;  } @@ -3497,7 +3520,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)  		 * Note, we use the fact that SND.UNA>=SND.WL2.  		 */  		tcp_update_wl(tp, ack_seq); -		tp->snd_una = ack; +		tcp_snd_una_update(tp, ack);  		flag |= FLAG_WIN_UPDATE;  		tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); @@ -4236,7 +4259,7 @@ static void tcp_ofo_queue(struct sock *sk)  		tail = skb_peek_tail(&sk->sk_receive_queue);  		eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); -		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; +		tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);  		if (!eaten)  			__skb_queue_tail(&sk->sk_receive_queue, skb);  		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) @@ -4404,7 +4427,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int  	__skb_pull(skb, hdrlen);  	eaten = (tail &&  		 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; -	tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; +	tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);  	if (!eaten) {  		__skb_queue_tail(&sk->sk_receive_queue, skb);  		skb_set_owner_r(skb, sk); @@ -4497,7 +4520,7 @@ queue_and_out:  			eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);  		} -		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; +		tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);  		if (skb->len)  			tcp_event_data_recv(sk, skb);  		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) @@ -5245,7 +5268,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,  					tcp_rcv_rtt_measure_ts(sk, skb);  					__skb_pull(skb, tcp_header_len); -					tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; +					tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);  					NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);  					eaten = 1;  				} |