diff options
Diffstat (limited to 'include/net/tcp.h')
| -rw-r--r-- | include/net/tcp.h | 62 | 
1 files changed, 54 insertions, 8 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index cd3ecda9386a..770917d0caa7 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -36,6 +36,7 @@  #include <net/inet_hashtables.h>  #include <net/checksum.h>  #include <net/request_sock.h> +#include <net/sock_reuseport.h>  #include <net/sock.h>  #include <net/snmp.h>  #include <net/ip.h> @@ -473,19 +474,45 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);   */  static inline void tcp_synq_overflow(const struct sock *sk)  { -	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; -	unsigned long now = jiffies; +	unsigned int last_overflow; +	unsigned int now = jiffies; -	if (time_after(now, last_overflow + HZ)) +	if (sk->sk_reuseport) { +		struct sock_reuseport *reuse; + +		reuse = rcu_dereference(sk->sk_reuseport_cb); +		if (likely(reuse)) { +			last_overflow = READ_ONCE(reuse->synq_overflow_ts); +			if (time_after32(now, last_overflow + HZ)) +				WRITE_ONCE(reuse->synq_overflow_ts, now); +			return; +		} +	} + +	last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; +	if (time_after32(now, last_overflow + HZ))  		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;  }  /* syncookies: no recent synqueue overflow on this listening socket? */  static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)  { -	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; +	unsigned int last_overflow; +	unsigned int now = jiffies; -	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); +	if (sk->sk_reuseport) { +		struct sock_reuseport *reuse; + +		reuse = rcu_dereference(sk->sk_reuseport_cb); +		if (likely(reuse)) { +			last_overflow = READ_ONCE(reuse->synq_overflow_ts); +			return time_after32(now, last_overflow + +					    TCP_SYNCOOKIE_VALID); +		} +	} + +	last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; +	return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);  }  static inline u32 tcp_cookie_time(void) @@ -963,6 +990,8 @@ struct rate_sample {  	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */  	s32  delivered;		/* number of packets delivered over interval */  	long interval_us;	/* time for tp->delivered to incr "delivered" */ +	u32 snd_interval_us;	/* snd interval for delivered packets */ +	u32 rcv_interval_us;	/* rcv interval for delivered packets */  	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */  	int  losses;		/* number of packets marked lost upon ACK */  	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */ @@ -1194,6 +1223,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)  	return tp->is_cwnd_limited;  } +/* BBR congestion control needs pacing. + * Same remark for SO_MAX_PACING_RATE. + * sch_fq packet scheduler is efficiently handling pacing, + * but is not always installed/used. + * Return true if TCP stack should pace packets itself. + */ +static inline bool tcp_needs_internal_pacing(const struct sock *sk) +{ +	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; +} +  /* Something is really bad, we could not queue an additional packet,   * because qdisc is full or receiver sent a 0 window.   * We do not want to add fuel to the fire, or abort too early, @@ -1371,7 +1411,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,  {  	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)  		return true; -	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) +	if (unlikely(!time_before32(ktime_get_seconds(), +				    rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))  		return true;  	/*  	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, @@ -1401,7 +1442,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,  	   However, we can relax time bounds for RST segments to MSL.  	 */ -	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) +	if (rst && !time_before32(ktime_get_seconds(), +				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))  		return false;  	return true;  } @@ -1787,7 +1829,7 @@ void tcp_v4_destroy_sock(struct sock *sk);  struct sk_buff *tcp_gso_segment(struct sk_buff *skb,  				netdev_features_t features); -struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);  int tcp_gro_complete(struct sk_buff *skb);  void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); @@ -2023,6 +2065,10 @@ int tcp_set_ulp_id(struct sock *sk, const int ulp);  void tcp_get_available_ulp(char *buf, size_t len);  void tcp_cleanup_ulp(struct sock *sk); +#define MODULE_ALIAS_TCP_ULP(name)				\ +	__MODULE_INFO(alias, alias_userspace, name);		\ +	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name) +  /* Call BPF_SOCK_OPS program that returns an int. If the return value   * is < 0, then the BPF op failed (for example if the loaded BPF   * program does not support the chosen operation or there is no BPF  |