diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 26 | 
1 files changed, 15 insertions, 11 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 438ecae6bd7e..e3142ee35fc6 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -641,7 +641,8 @@ static void rcu_read_unlock_special(struct task_struct *t)  		expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||  			   (rdp->grpmask & READ_ONCE(rnp->expmask)) || -			   IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) || +			   (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && +			   ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) ||  			   (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled &&  			    t->rcu_blocked_node);  		// Need to defer quiescent state until everything is enabled. @@ -718,9 +719,6 @@ static void rcu_flavor_sched_clock_irq(int user)  	struct task_struct *t = current;  	lockdep_assert_irqs_disabled(); -	if (user || rcu_is_cpu_rrupt_from_idle()) { -		rcu_note_voluntary_context_switch(current); -	}  	if (rcu_preempt_depth() > 0 ||  	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {  		/* No QS, force context switch if deferred. */ @@ -824,6 +822,7 @@ void rcu_read_unlock_strict(void)  	if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)  		return;  	rdp = this_cpu_ptr(&rcu_data); +	rdp->cpu_no_qs.b.norm = false;  	rcu_report_qs_rdp(rdp);  	udelay(rcu_unlock_delay);  } @@ -869,7 +868,7 @@ void rcu_all_qs(void)  	if (!raw_cpu_read(rcu_data.rcu_urgent_qs))  		return; -	preempt_disable(); +	preempt_disable();  // For CONFIG_PREEMPT_COUNT=y kernels  	/* Load rcu_urgent_qs before other flags. */  	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {  		preempt_enable(); @@ -931,10 +930,13 @@ static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)  	return false;  } -// Except that we do need to respond to a request by an expedited grace -// period for a quiescent state from this CPU.  Note that requests from -// tasks are handled when removing the task from the blocked-tasks list -// below. +// Except that we do need to respond to a request by an expedited +// grace period for a quiescent state from this CPU.  Note that in +// non-preemptible kernels, there can be no context switches within RCU +// read-side critical sections, which in turn means that the leaf rcu_node +// structure's blocked-tasks list is always empty.  is therefore no need to +// actually check it.  Instead, a quiescent state from this CPU suffices, +// and this function is only called from such a quiescent state.  notrace void rcu_preempt_deferred_qs(struct task_struct *t)  {  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data); @@ -972,7 +974,6 @@ static void rcu_flavor_sched_clock_irq(int user)  		 * neither access nor modify, at least not while the  		 * corresponding CPU is online.  		 */ -  		rcu_qs();  	}  } @@ -1238,8 +1239,11 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)  		    cpu != outgoingcpu)  			cpumask_set_cpu(cpu, cm);  	cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU)); -	if (cpumask_empty(cm)) +	if (cpumask_empty(cm)) {  		cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU)); +		if (outgoingcpu >= 0) +			cpumask_clear_cpu(outgoingcpu, cm); +	}  	set_cpus_allowed_ptr(t, cm);  	mutex_unlock(&rnp->boost_kthread_mutex);  	free_cpumask_var(cm);  |