diff options
Diffstat (limited to 'net/ipv4/tcp.c')
| -rw-r--r-- | net/ipv4/tcp.c | 50 | 
1 files changed, 46 insertions, 4 deletions
| diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0db0ec6b0a96..6c3eab485249 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -951,6 +951,40 @@ static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)  	return 0;  } +static int tcp_wmem_schedule(struct sock *sk, int copy) +{ +	int left; + +	if (likely(sk_wmem_schedule(sk, copy))) +		return copy; + +	/* We could be in trouble if we have nothing queued. +	 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] +	 * to guarantee some progress. +	 */ +	left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued; +	if (left > 0) +		sk_forced_mem_schedule(sk, min(left, copy)); +	return min(copy, sk->sk_forward_alloc); +} + +static int tcp_wmem_schedule(struct sock *sk, int copy) +{ +	int left; + +	if (likely(sk_wmem_schedule(sk, copy))) +		return copy; + +	/* We could be in trouble if we have nothing queued. +	 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] +	 * to guarantee some progress. +	 */ +	left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued; +	if (left > 0) +		sk_forced_mem_schedule(sk, min(left, copy)); +	return min(copy, sk->sk_forward_alloc); +} +  static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,  				      struct page *page, int offset, size_t *size)  { @@ -986,7 +1020,11 @@ new_segment:  		tcp_mark_push(tp, skb);  		goto new_segment;  	} -	if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy)) +	if (tcp_downgrade_zcopy_pure(sk, skb)) +		return NULL; + +	copy = tcp_wmem_schedule(sk, copy); +	if (!copy)  		return NULL;  	if (can_coalesce) { @@ -1334,8 +1372,11 @@ new_segment:  			copy = min_t(int, copy, pfrag->size - pfrag->offset); -			if (tcp_downgrade_zcopy_pure(sk, skb) || -			    !sk_wmem_schedule(sk, copy)) +			if (tcp_downgrade_zcopy_pure(sk, skb)) +				goto wait_for_space; + +			copy = tcp_wmem_schedule(sk, copy); +			if (!copy)  				goto wait_for_space;  			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, @@ -1362,7 +1403,8 @@ new_segment:  				skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;  			if (!skb_zcopy_pure(skb)) { -				if (!sk_wmem_schedule(sk, copy)) +				copy = tcp_wmem_schedule(sk, copy); +				if (!copy)  					goto wait_for_space;  			} |