diff options
Diffstat (limited to 'kernel/rcu/tree_exp.h')
| -rw-r--r-- | kernel/rcu/tree_exp.h | 8 | 
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index af7e7b9c86af..d632cd019597 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -781,7 +781,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)   * other hand, if the CPU is not in an RCU read-side critical section,   * the IPI handler reports the quiescent state immediately.   * - * Although this is a greate improvement over previous expedited + * Although this is a great improvement over previous expedited   * implementations, it is still unfriendly to real-time workloads, so is   * thus not recommended for any sort of common-case code.  In fact, if   * you are using synchronize_rcu_expedited() in a loop, please restructure @@ -792,6 +792,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)   */  void synchronize_rcu_expedited(void)  { +	bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);  	struct rcu_exp_work rew;  	struct rcu_node *rnp;  	unsigned long s; @@ -817,7 +818,7 @@ void synchronize_rcu_expedited(void)  		return;  /* Someone else did our work for us. */  	/* Ensure that load happens before action based on it. */ -	if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { +	if (unlikely(boottime)) {  		/* Direct call during scheduler init and early_initcalls(). */  		rcu_exp_sel_wait_wake(s);  	} else { @@ -835,5 +836,8 @@ void synchronize_rcu_expedited(void)  	/* Let the next expedited grace period start. */  	mutex_unlock(&rcu_state.exp_mutex); + +	if (likely(!boottime)) +		destroy_work_on_stack(&rew.rew_work);  }  EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);  |