diff options
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 27 | 
1 files changed, 19 insertions, 8 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8e880c09ab59..7b95ee98a1a5 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3024,6 +3024,18 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)  	return !!READ_ONCE(krcp->head);  } +static bool +need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp) +{ +	int i; + +	for (i = 0; i < FREE_N_CHANNELS; i++) +		if (!list_empty(&krwp->bulk_head_free[i])) +			return true; + +	return !!krwp->head_free; +} +  static int krc_count(struct kfree_rcu_cpu *krcp)  {  	int sum = atomic_read(&krcp->head_count); @@ -3107,15 +3119,14 @@ static void kfree_rcu_monitor(struct work_struct *work)  	for (i = 0; i < KFREE_N_BATCHES; i++) {  		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]); -		// Try to detach bulk_head or head and attach it over any -		// available corresponding free channel. It can be that -		// a previous RCU batch is in progress, it means that -		// immediately to queue another one is not possible so -		// in that case the monitor work is rearmed. -		if ((!list_empty(&krcp->bulk_head[0]) && list_empty(&krwp->bulk_head_free[0])) || -			(!list_empty(&krcp->bulk_head[1]) && list_empty(&krwp->bulk_head_free[1])) || -				(READ_ONCE(krcp->head) && !krwp->head_free)) { +		// Try to detach bulk_head or head and attach it, only when +		// all channels are free.  Any channel is not free means at krwp +		// there is on-going rcu work to handle krwp's free business. +		if (need_wait_for_krwp_work(krwp)) +			continue; +		// kvfree_rcu_drain_ready() might handle this krcp, if so give up. +		if (need_offload_krc(krcp)) {  			// Channel 1 corresponds to the SLAB-pointer bulk path.  			// Channel 2 corresponds to vmalloc-pointer bulk path.  			for (j = 0; j < FREE_N_CHANNELS; j++) {  |