diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 144 | 
1 files changed, 41 insertions, 103 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 962d1d589929..cbc2c45265e2 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -116,7 +116,7 @@ static void __init rcu_bootup_announce_oddness(void)  #ifdef CONFIG_TREE_PREEMPT_RCU  RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); -static struct rcu_state *rcu_state = &rcu_preempt_state; +static struct rcu_state *rcu_state_p = &rcu_preempt_state;  static int rcu_preempted_readers_exp(struct rcu_node *rnp); @@ -149,15 +149,6 @@ long rcu_batches_completed(void)  EXPORT_SYMBOL_GPL(rcu_batches_completed);  /* - * Force a quiescent state for preemptible RCU. - */ -void rcu_force_quiescent_state(void) -{ -	force_quiescent_state(&rcu_preempt_state); -} -EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); - -/*   * Record a preemptible-RCU quiescent state for the specified CPU.  Note   * that this just means that the task currently running on the CPU is   * not in a quiescent state.  There might be any number of tasks blocked @@ -688,20 +679,6 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))  }  EXPORT_SYMBOL_GPL(call_rcu); -/* - * Queue an RCU callback for lazy invocation after a grace period. - * This will likely be later named something like "call_rcu_lazy()", - * but this change will require some way of tagging the lazy RCU - * callbacks in the list of pending callbacks.  Until then, this - * function may only be called from __kfree_rcu(). - */ -void kfree_call_rcu(struct rcu_head *head, -		    void (*func)(struct rcu_head *rcu)) -{ -	__call_rcu(head, func, &rcu_preempt_state, -1, 1); -} -EXPORT_SYMBOL_GPL(kfree_call_rcu); -  /**   * synchronize_rcu - wait until a grace period has elapsed.   * @@ -970,7 +947,7 @@ void exit_rcu(void)  #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ -static struct rcu_state *rcu_state = &rcu_sched_state; +static struct rcu_state *rcu_state_p = &rcu_sched_state;  /*   * Tell them what RCU they are running. @@ -991,16 +968,6 @@ long rcu_batches_completed(void)  EXPORT_SYMBOL_GPL(rcu_batches_completed);  /* - * Force a quiescent state for RCU, which, because there is no preemptible - * RCU, becomes the same as rcu-sched. - */ -void rcu_force_quiescent_state(void) -{ -	rcu_sched_force_quiescent_state(); -} -EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); - -/*   * Because preemptible RCU does not exist, we never have to check for   * CPUs being in quiescent states.   */ @@ -1080,22 +1047,6 @@ static void rcu_preempt_check_callbacks(int cpu)  }  /* - * Queue an RCU callback for lazy invocation after a grace period. - * This will likely be later named something like "call_rcu_lazy()", - * but this change will require some way of tagging the lazy RCU - * callbacks in the list of pending callbacks.  Until then, this - * function may only be called from __kfree_rcu(). - * - * Because there is no preemptible RCU, we use RCU-sched instead. - */ -void kfree_call_rcu(struct rcu_head *head, -		    void (*func)(struct rcu_head *rcu)) -{ -	__call_rcu(head, func, &rcu_sched_state, -1, 1); -} -EXPORT_SYMBOL_GPL(kfree_call_rcu); - -/*   * Wait for an rcu-preempt grace period, but make it happen quickly.   * But because preemptible RCU does not exist, map to rcu-sched.   */ @@ -1517,11 +1468,11 @@ static int __init rcu_spawn_kthreads(void)  	for_each_possible_cpu(cpu)  		per_cpu(rcu_cpu_has_work, cpu) = 0;  	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); -	rnp = rcu_get_root(rcu_state); -	(void)rcu_spawn_one_boost_kthread(rcu_state, rnp); +	rnp = rcu_get_root(rcu_state_p); +	(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);  	if (NUM_RCU_NODES > 1) { -		rcu_for_each_leaf_node(rcu_state, rnp) -			(void)rcu_spawn_one_boost_kthread(rcu_state, rnp); +		rcu_for_each_leaf_node(rcu_state_p, rnp) +			(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);  	}  	return 0;  } @@ -1529,12 +1480,12 @@ early_initcall(rcu_spawn_kthreads);  static void rcu_prepare_kthreads(int cpu)  { -	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); +	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);  	struct rcu_node *rnp = rdp->mynode;  	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */  	if (rcu_scheduler_fully_active) -		(void)rcu_spawn_one_boost_kthread(rcu_state, rnp); +		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);  }  #else /* #ifdef CONFIG_RCU_BOOST */ @@ -1744,6 +1695,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)  static void rcu_prepare_for_idle(int cpu)  {  #ifndef CONFIG_RCU_NOCB_CPU_ALL +	bool needwake;  	struct rcu_data *rdp;  	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);  	struct rcu_node *rnp; @@ -1792,8 +1744,10 @@ static void rcu_prepare_for_idle(int cpu)  		rnp = rdp->mynode;  		raw_spin_lock(&rnp->lock); /* irqs already disabled. */  		smp_mb__after_unlock_lock(); -		rcu_accelerate_cbs(rsp, rnp, rdp); +		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);  		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ +		if (needwake) +			rcu_gp_kthread_wake(rsp);  	}  #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */  } @@ -1855,7 +1809,7 @@ static void rcu_oom_notify_cpu(void *unused)  	struct rcu_data *rdp;  	for_each_rcu_flavor(rsp) { -		rdp = __this_cpu_ptr(rsp->rda); +		rdp = raw_cpu_ptr(rsp->rda);  		if (rdp->qlen_lazy != 0) {  			atomic_inc(&oom_callback_count);  			rsp->call(&rdp->oom_head, rcu_oom_callback); @@ -1997,7 +1951,7 @@ static void increment_cpu_stall_ticks(void)  	struct rcu_state *rsp;  	for_each_rcu_flavor(rsp) -		__this_cpu_ptr(rsp->rda)->ticks_this_gp++; +		raw_cpu_inc(rsp->rda->ticks_this_gp);  }  #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ @@ -2068,19 +2022,6 @@ static int __init parse_rcu_nocb_poll(char *arg)  early_param("rcu_nocb_poll", parse_rcu_nocb_poll);  /* - * Do any no-CBs CPUs need another grace period? - * - * Interrupts must be disabled.  If the caller does not hold the root - * rnp_node structure's ->lock, the results are advisory only. - */ -static int rcu_nocb_needs_gp(struct rcu_state *rsp) -{ -	struct rcu_node *rnp = rcu_get_root(rsp); - -	return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1]; -} - -/*   * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended   * grace period.   */ @@ -2109,7 +2050,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)  }  #ifndef CONFIG_RCU_NOCB_CPU_ALL -/* Is the specified CPU a no-CPUs CPU? */ +/* Is the specified CPU a no-CBs CPU? */  bool rcu_is_nocb_cpu(int cpu)  {  	if (have_rcu_nocb_mask) @@ -2243,12 +2184,15 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)  	unsigned long c;  	bool d;  	unsigned long flags; +	bool needwake;  	struct rcu_node *rnp = rdp->mynode;  	raw_spin_lock_irqsave(&rnp->lock, flags);  	smp_mb__after_unlock_lock(); -	c = rcu_start_future_gp(rnp, rdp); +	needwake = rcu_start_future_gp(rnp, rdp, &c);  	raw_spin_unlock_irqrestore(&rnp->lock, flags); +	if (needwake) +		rcu_gp_kthread_wake(rdp->rsp);  	/*  	 * Wait for the grace period.  Do so interruptibly to avoid messing @@ -2402,11 +2346,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)  #else /* #ifdef CONFIG_RCU_NOCB_CPU */ -static int rcu_nocb_needs_gp(struct rcu_state *rsp) -{ -	return 0; -} -  static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)  {  } @@ -2523,9 +2462,9 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)  	/* Record start of fully idle period. */  	j = jiffies;  	ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; -	smp_mb__before_atomic_inc(); +	smp_mb__before_atomic();  	atomic_inc(&rdtp->dynticks_idle); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);  } @@ -2590,9 +2529,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)  	}  	/* Record end of idle period. */ -	smp_mb__before_atomic_inc(); +	smp_mb__before_atomic();  	atomic_inc(&rdtp->dynticks_idle); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));  	/* @@ -2657,20 +2596,6 @@ static bool is_sysidle_rcu_state(struct rcu_state *rsp)  }  /* - * Bind the grace-period kthread for the sysidle flavor of RCU to the - * timekeeping CPU. - */ -static void rcu_bind_gp_kthread(void) -{ -	int cpu = ACCESS_ONCE(tick_do_timer_cpu); - -	if (cpu < 0 || cpu >= nr_cpu_ids) -		return; -	if (raw_smp_processor_id() != cpu) -		set_cpus_allowed_ptr(current, cpumask_of(cpu)); -} - -/*   * Return a delay in jiffies based on the number of CPUs, rcu_node   * leaf fanout, and jiffies tick rate.  The idea is to allow larger   * systems more time to transition to full-idle state in order to @@ -2734,7 +2659,8 @@ static void rcu_sysidle(unsigned long j)  static void rcu_sysidle_cancel(void)  {  	smp_mb(); -	ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; +	if (full_sysidle_state > RCU_SYSIDLE_SHORT) +		ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;  }  /* @@ -2880,10 +2806,6 @@ static bool is_sysidle_rcu_state(struct rcu_state *rsp)  	return false;  } -static void rcu_bind_gp_kthread(void) -{ -} -  static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,  				  unsigned long maxj)  { @@ -2914,3 +2836,19 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)  #endif /* #ifdef CONFIG_NO_HZ_FULL */  	return 0;  } + +/* + * Bind the grace-period kthread for the sysidle flavor of RCU to the + * timekeeping CPU. + */ +static void rcu_bind_gp_kthread(void) +{ +#ifdef CONFIG_NO_HZ_FULL +	int cpu = ACCESS_ONCE(tick_do_timer_cpu); + +	if (cpu < 0 || cpu >= nr_cpu_ids) +		return; +	if (raw_smp_processor_id() != cpu) +		set_cpus_allowed_ptr(current, cpumask_of(cpu)); +#endif /* #ifdef CONFIG_NO_HZ_FULL */ +}  |