diff options
Diffstat (limited to 'include/net/sock.h')
| -rw-r--r-- | include/net/sock.h | 15 | 
1 files changed, 10 insertions, 5 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index f69b58bff7e5..8f9adcfac41b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -954,8 +954,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk)  {  	int cpu = raw_smp_processor_id(); -	if (unlikely(sk->sk_incoming_cpu != cpu)) -		sk->sk_incoming_cpu = cpu; +	if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu)) +		WRITE_ONCE(sk->sk_incoming_cpu, cpu);  }  static inline void sock_rps_record_flow_hash(__u32 hash) @@ -2242,12 +2242,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,   * sk_page_frag - return an appropriate page_frag   * @sk: socket   * - * If socket allocation mode allows current thread to sleep, it means its - * safe to use the per task page_frag instead of the per socket one. + * Use the per task page_frag instead of the per socket one for + * optimization when we know that we're in the normal context and owns + * everything that's associated with %current. + * + * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest + * inside other socket operations and end up recursing into sk_page_frag() + * while it's already in use.   */  static inline struct page_frag *sk_page_frag(struct sock *sk)  { -	if (gfpflags_allow_blocking(sk->sk_allocation)) +	if (gfpflags_normal_context(sk->sk_allocation))  		return ¤t->task_frag;  	return &sk->sk_frag;  |