diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 60 | 
1 files changed, 29 insertions, 31 deletions
| diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 1c054431e358..4c376b6d8764 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -167,16 +167,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,  	if (tcp_packets_in_flight(tp) == 0)  		tcp_ca_event(sk, CA_EVENT_TX_START); -	/* If this is the first data packet sent in response to the -	 * previous received data, -	 * and it is a reply for ato after last received packet, -	 * increase pingpong count. -	 */ -	if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) && -	    (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) -		inet_csk_inc_pingpong_cnt(sk); -  	tp->lsndtime = now; + +	/* If it is a reply for ato after last received +	 * packet, enter pingpong mode. +	 */ +	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) +		inet_csk_enter_pingpong_mode(sk);  }  /* Account for an ACK we sent. */ @@ -230,7 +227,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,  	 * which we interpret as a sign the remote TCP is not  	 * misinterpreting the window field as a signed quantity.  	 */ -	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) +	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))  		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);  	else  		(*rcv_wnd) = min_t(u32, space, U16_MAX); @@ -241,7 +238,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,  	*rcv_wscale = 0;  	if (wscale_ok) {  		/* Set window scaling on max possible window */ -		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); +		space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));  		space = max_t(u32, space, sysctl_rmem_max);  		space = min_t(u32, space, *window_clamp);  		*rcv_wscale = clamp_t(int, ilog2(space) - 15, @@ -285,7 +282,7 @@ static u16 tcp_select_window(struct sock *sk)  	 * scaled window.  	 */  	if (!tp->rx_opt.rcv_wscale && -	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) +	    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))  		new_win = min(new_win, MAX_TCP_WINDOW);  	else  		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); @@ -324,7 +321,7 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)  {  	struct tcp_sock *tp = tcp_sk(sk);  	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk); -	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || +	bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||  		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;  	if (!use_ecn) { @@ -346,7 +343,7 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)  static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)  { -	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) +	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))  		/* tp->ecn_flags are cleared at a later point in time when  		 * SYN ACK is ultimatively being received.  		 */ @@ -791,18 +788,18 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,  	opts->mss = tcp_advertise_mss(sk);  	remaining -= TCPOLEN_MSS_ALIGNED; -	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { +	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) {  		opts->options |= OPTION_TS;  		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;  		opts->tsecr = tp->rx_opt.ts_recent;  		remaining -= TCPOLEN_TSTAMP_ALIGNED;  	} -	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { +	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {  		opts->ws = tp->rx_opt.rcv_wscale;  		opts->options |= OPTION_WSCALE;  		remaining -= TCPOLEN_WSCALE_ALIGNED;  	} -	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { +	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {  		opts->options |= OPTION_SACK_ADVERTISE;  		if (unlikely(!(OPTION_TS & opts->options)))  			remaining -= TCPOLEN_SACKPERM_ALIGNED; @@ -1719,7 +1716,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)  	mss_now -= icsk->icsk_ext_hdr_len;  	/* Then reserve room for full set of TCP options and 8 bytes of data */ -	mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); +	mss_now = max(mss_now, +		      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));  	return mss_now;  } @@ -1762,10 +1760,10 @@ void tcp_mtup_init(struct sock *sk)  	struct inet_connection_sock *icsk = inet_csk(sk);  	struct net *net = sock_net(sk); -	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; +	icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;  	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +  			       icsk->icsk_af_ops->net_header_len; -	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); +	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));  	icsk->icsk_mtup.probe_size = 0;  	if (icsk->icsk_mtup.enabled)  		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; @@ -1897,7 +1895,7 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)  		if (tp->packets_out > tp->snd_cwnd_used)  			tp->snd_cwnd_used = tp->packets_out; -		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle && +		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&  		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&  		    !ca_ops->cong_control)  			tcp_cwnd_application_limited(sk); @@ -1975,7 +1973,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,  	bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift); -	r = tcp_min_rtt(tcp_sk(sk)) >> sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log; +	r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);  	if (r < BITS_PER_TYPE(sk->sk_gso_max_size))  		bytes += sk->sk_gso_max_size >> r; @@ -1994,7 +1992,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)  	min_tso = ca_ops->min_tso_segs ?  			ca_ops->min_tso_segs(sk) : -			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs; +			READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);  	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);  	return min_t(u32, tso_segs, sk->sk_gso_max_segs); @@ -2282,7 +2280,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)  	u32 interval;  	s32 delta; -	interval = net->ipv4.sysctl_tcp_probe_interval; +	interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);  	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;  	if (unlikely(delta >= interval * HZ)) {  		int mss = tcp_current_mss(sk); @@ -2366,7 +2364,7 @@ static int tcp_mtu_probe(struct sock *sk)  	 * probing process by not resetting search range to its orignal.  	 */  	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || -		interval < net->ipv4.sysctl_tcp_probe_threshold) { +	    interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {  		/* Check whether enough time has elaplased for  		 * another round of probing.  		 */ @@ -2506,7 +2504,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,  		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));  	if (sk->sk_pacing_status == SK_PACING_NONE)  		limit = min_t(unsigned long, limit, -			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); +			      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));  	limit <<= factor;  	if (static_branch_unlikely(&tcp_tx_delay_enabled) && @@ -2740,7 +2738,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)  	if (rcu_access_pointer(tp->fastopen_rsk))  		return false; -	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; +	early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);  	/* Schedule a loss probe in 2*RTT for SACK capable connections  	 * not in loss recovery, that are either limited by cwnd or application.  	 */ @@ -3104,7 +3102,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,  	struct sk_buff *skb = to, *tmp;  	bool first = true; -	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse) +	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))  		return;  	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)  		return; @@ -3646,7 +3644,7 @@ static void tcp_connect_init(struct sock *sk)  	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.  	 */  	tp->tcp_header_len = sizeof(struct tcphdr); -	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) +	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))  		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;  #ifdef CONFIG_TCP_MD5SIG @@ -3682,7 +3680,7 @@ static void tcp_connect_init(struct sock *sk)  				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),  				  &tp->rcv_wnd,  				  &tp->window_clamp, -				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling, +				  READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),  				  &rcv_wscale,  				  rcv_wnd); @@ -4089,7 +4087,7 @@ void tcp_send_probe0(struct sock *sk)  	icsk->icsk_probes_out++;  	if (err <= 0) { -		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) +		if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))  			icsk->icsk_backoff++;  		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);  	} else { |