diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 55 | 
1 files changed, 27 insertions, 28 deletions
| diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8e08b409c71e..597dbd749f05 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,  }  /* Account for an ACK we sent. */ -static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) +static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, +				      u32 rcv_nxt)  {  	struct tcp_sock *tp = tcp_sk(sk); @@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)  		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)  			__sock_put(sk);  	} + +	if (unlikely(rcv_nxt != tp->rcv_nxt)) +		return;  /* Special ACK sent by DCTCP to reflect ECN */  	tcp_dec_quickack_mode(sk, pkts);  	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);  } @@ -973,17 +977,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)  	return HRTIMER_NORESTART;  } -/* BBR congestion control needs pacing. - * Same remark for SO_MAX_PACING_RATE. - * sch_fq packet scheduler is efficiently handling pacing, - * but is not always installed/used. - * Return true if TCP stack should pace packets itself. - */ -static bool tcp_needs_internal_pacing(const struct sock *sk) -{ -	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; -} -  static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)  {  	u64 len_ns; @@ -995,9 +988,6 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)  	if (!rate || rate == ~0U)  		return; -	/* Should account for header sizes as sch_fq does, -	 * but lets make things simple. -	 */  	len_ns = (u64)skb->len * NSEC_PER_SEC;  	do_div(len_ns, rate);  	hrtimer_start(&tcp_sk(sk)->pacing_timer, @@ -1023,8 +1013,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)   * We are working here with either a clone of the original   * SKB, or a fresh unique copy made by the retransmit engine.   */ -static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, -			    gfp_t gfp_mask) +static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, +			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)  {  	const struct inet_connection_sock *icsk = inet_csk(sk);  	struct inet_sock *inet; @@ -1100,7 +1090,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,  	th->source		= inet->inet_sport;  	th->dest		= inet->inet_dport;  	th->seq			= htonl(tcb->seq); -	th->ack_seq		= htonl(tp->rcv_nxt); +	th->ack_seq		= htonl(rcv_nxt);  	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |  					tcb->tcp_flags); @@ -1141,11 +1131,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,  	icsk->icsk_af_ops->send_check(sk, skb);  	if (likely(tcb->tcp_flags & TCPHDR_ACK)) -		tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); +		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);  	if (skb->len != tcp_header_size) {  		tcp_event_data_sent(tp, sk);  		tp->data_segs_out += tcp_skb_pcount(skb); +		tp->bytes_sent += skb->len - tcp_header_size;  		tcp_internal_pacing(sk, skb);  	} @@ -1178,6 +1169,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,  	return err;  } +static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, +			    gfp_t gfp_mask) +{ +	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, +				  tcp_sk(sk)->rcv_nxt); +} +  /* This routine just queues the buffer for sending.   *   * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, @@ -2700,9 +2698,8 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)  {  	struct tcp_sock *tp = tcp_sk(sk);  	struct sk_buff *next_skb = skb_rb_next(skb); -	int skb_size, next_skb_size; +	int next_skb_size; -	skb_size = skb->len;  	next_skb_size = next_skb->len;  	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); @@ -2873,6 +2870,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)  	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)  		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);  	tp->total_retrans += segs; +	tp->bytes_retrans += skb->len;  	/* make sure skb->data is aligned on arches that require it  	 * and check if ack-trimming & collapsing extended the headroom @@ -3523,8 +3521,6 @@ void tcp_send_delayed_ack(struct sock *sk)  	int ato = icsk->icsk_ack.ato;  	unsigned long timeout; -	tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); -  	if (ato > TCP_DELACK_MIN) {  		const struct tcp_sock *tp = tcp_sk(sk);  		int max_ato = HZ / 2; @@ -3573,7 +3569,7 @@ void tcp_send_delayed_ack(struct sock *sk)  }  /* This routine sends an ack and also updates the window. */ -void tcp_send_ack(struct sock *sk) +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)  {  	struct sk_buff *buff; @@ -3581,8 +3577,6 @@ void tcp_send_ack(struct sock *sk)  	if (sk->sk_state == TCP_CLOSE)  		return; -	tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); -  	/* We are not putting this on the write queue, so  	 * tcp_transmit_skb() will set the ownership to this  	 * sock. @@ -3608,9 +3602,14 @@ void tcp_send_ack(struct sock *sk)  	skb_set_tcp_pure_ack(buff);  	/* Send it off, this clears delayed acks for us. */ -	tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); +	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); +} +EXPORT_SYMBOL_GPL(__tcp_send_ack); + +void tcp_send_ack(struct sock *sk) +{ +	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);  } -EXPORT_SYMBOL_GPL(tcp_send_ack);  /* This routine sends a packet with an out of date sequence   * number. It assumes the other end will try to ack it. |