diff options
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
| -rw-r--r-- | net/ipv4/tcp_minisocks.c | 61 | 
1 files changed, 25 insertions, 36 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index dd11ac7798c6..63d6311b5365 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -34,18 +34,7 @@ int sysctl_tcp_abort_on_overflow __read_mostly;  struct inet_timewait_death_row tcp_death_row = {  	.sysctl_max_tw_buckets = NR_FILE * 2, -	.period		= TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, -	.death_lock	= __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),  	.hashinfo	= &tcp_hashinfo, -	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0, -					    (unsigned long)&tcp_death_row), -	.twkill_work	= __WORK_INITIALIZER(tcp_death_row.twkill_work, -					     inet_twdr_twkill_work), -/* Short-time timewait calendar */ - -	.twcal_hand	= -1, -	.twcal_timer	= TIMER_INITIALIZER(inet_twdr_twcal_tick, 0, -					    (unsigned long)&tcp_death_row),  };  EXPORT_SYMBOL_GPL(tcp_death_row); @@ -158,7 +147,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,  		if (!th->fin ||  		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {  kill_with_rst: -			inet_twsk_deschedule(tw, &tcp_death_row); +			inet_twsk_deschedule(tw);  			inet_twsk_put(tw);  			return TCP_TW_RST;  		} @@ -174,11 +163,9 @@ kill_with_rst:  		if (tcp_death_row.sysctl_tw_recycle &&  		    tcptw->tw_ts_recent_stamp &&  		    tcp_tw_remember_stamp(tw)) -			inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout, -					   TCP_TIMEWAIT_LEN); +			inet_twsk_schedule(tw, tw->tw_timeout);  		else -			inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, -					   TCP_TIMEWAIT_LEN); +			inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);  		return TCP_TW_ACK;  	} @@ -211,13 +198,12 @@ kill_with_rst:  			 */  			if (sysctl_tcp_rfc1337 == 0) {  kill: -				inet_twsk_deschedule(tw, &tcp_death_row); +				inet_twsk_deschedule(tw);  				inet_twsk_put(tw);  				return TCP_TW_SUCCESS;  			}  		} -		inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, -				   TCP_TIMEWAIT_LEN); +		inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);  		if (tmp_opt.saw_tstamp) {  			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval; @@ -267,8 +253,7 @@ kill:  		 * Do not reschedule in the last case.  		 */  		if (paws_reject || th->ack) -			inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, -					   TCP_TIMEWAIT_LEN); +			inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);  		return tcp_timewait_check_oow_rate_limit(  			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); @@ -283,18 +268,17 @@ EXPORT_SYMBOL(tcp_timewait_state_process);   */  void tcp_time_wait(struct sock *sk, int state, int timeo)  { -	struct inet_timewait_sock *tw = NULL;  	const struct inet_connection_sock *icsk = inet_csk(sk);  	const struct tcp_sock *tp = tcp_sk(sk); +	struct inet_timewait_sock *tw;  	bool recycle_ok = false;  	if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)  		recycle_ok = tcp_remember_stamp(sk); -	if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) -		tw = inet_twsk_alloc(sk, state); +	tw = inet_twsk_alloc(sk, &tcp_death_row, state); -	if (tw != NULL) { +	if (tw) {  		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);  		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);  		struct inet_sock *inet = inet_sk(sk); @@ -332,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)  			struct tcp_md5sig_key *key;  			tcptw->tw_md5_key = NULL;  			key = tp->af_specific->md5_lookup(sk, sk); -			if (key != NULL) { +			if (key) {  				tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);  				if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())  					BUG(); @@ -355,8 +339,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)  				timeo = TCP_TIMEWAIT_LEN;  		} -		inet_twsk_schedule(tw, &tcp_death_row, timeo, -				   TCP_TIMEWAIT_LEN); +		inet_twsk_schedule(tw, timeo);  		inet_twsk_put(tw);  	} else {  		/* Sorry, if we're out of memory, just CLOSE this @@ -454,7 +437,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,  {  	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); -	if (newsk != NULL) { +	if (newsk) {  		const struct inet_request_sock *ireq = inet_rsk(req);  		struct tcp_request_sock *treq = tcp_rsk(req);  		struct inet_connection_sock *newicsk = inet_csk(newsk); @@ -572,7 +555,6 @@ EXPORT_SYMBOL(tcp_create_openreq_child);  struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,  			   struct request_sock *req, -			   struct request_sock **prev,  			   bool fastopen)  {  	struct tcp_options_received tmp_opt; @@ -629,9 +611,16 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,  					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,  					  &tcp_rsk(req)->last_oow_ack_time) && -		    !inet_rtx_syn_ack(sk, req)) -			req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout, -					   TCP_RTO_MAX) + jiffies; +		    !inet_rtx_syn_ack(sk, req)) { +			unsigned long expires = jiffies; + +			expires += min(TCP_TIMEOUT_INIT << req->num_timeout, +				       TCP_RTO_MAX); +			if (!fastopen) +				mod_timer_pending(&req->rsk_timer, expires); +			else +				req->rsk_timer.expires = expires; +		}  		return NULL;  	} @@ -763,10 +752,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,  	 * socket is created, wait for troubles.  	 */  	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); -	if (child == NULL) +	if (!child)  		goto listen_overflow; -	inet_csk_reqsk_queue_unlink(sk, req, prev); +	inet_csk_reqsk_queue_unlink(sk, req);  	inet_csk_reqsk_queue_removed(sk, req);  	inet_csk_reqsk_queue_add(sk, req, child); @@ -791,7 +780,7 @@ embryonic_reset:  		tcp_reset(sk);  	}  	if (!fastopen) { -		inet_csk_reqsk_queue_drop(sk, req, prev); +		inet_csk_reqsk_queue_drop(sk, req);  		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);  	}  	return NULL;  |