diff options
Diffstat (limited to 'kernel/rcu/tree.h')
| -rw-r--r-- | kernel/rcu/tree.h | 31 | 
1 files changed, 15 insertions, 16 deletions
| diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 305cf6aeb408..486fc901bd08 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -157,7 +157,6 @@ struct rcu_data {  	bool		core_needs_qs;	/* Core waits for quiescent state. */  	bool		beenonline;	/* CPU online at least once. */  	bool		gpwrap;		/* Possible ->gp_seq wrap. */ -	bool		exp_deferred_qs; /* This CPU awaiting a deferred QS? */  	bool		cpu_started;	/* RCU watching this onlining CPU. */  	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */  	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */ @@ -189,11 +188,6 @@ struct rcu_data {  	bool rcu_urgent_qs;		/* GP old need light quiescent state. */  	bool rcu_forced_tick;		/* Forced tick to provide QS. */  	bool rcu_forced_tick_exp;	/*   ... provide QS to expedited GP. */ -#ifdef CONFIG_RCU_FAST_NO_HZ -	unsigned long last_accelerate;	/* Last jiffy CBs were accelerated. */ -	unsigned long last_advance_all;	/* Last jiffy CBs were all advanced. */ -	int tick_nohz_enabled_snap;	/* Previously seen value from sysfs. */ -#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */  	/* 4) rcu_barrier(), OOM callbacks, and expediting. */  	struct rcu_head barrier_head; @@ -227,8 +221,11 @@ struct rcu_data {  	struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */  	bool nocb_cb_sleep;		/* Is the nocb CB thread asleep? */  	struct task_struct *nocb_cb_kthread; -	struct rcu_data *nocb_next_cb_rdp; -					/* Next rcu_data in wakeup chain. */ +	struct list_head nocb_head_rdp; /* +					 * Head of rcu_data list in wakeup chain, +					 * if rdp_gp. +					 */ +	struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */  	/* The following fields are used by CB kthread, hence new cacheline. */  	struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; @@ -419,8 +416,6 @@ static bool rcu_is_callbacks_kthread(void);  static void rcu_cpu_kthread_setup(unsigned int cpu);  static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);  static void __init rcu_spawn_boost_kthreads(void); -static void rcu_cleanup_after_idle(void); -static void rcu_prepare_for_idle(void);  static bool rcu_preempt_has_tasks(struct rcu_node *rnp);  static bool rcu_preempt_need_deferred_qs(struct task_struct *t);  static void rcu_preempt_deferred_qs(struct task_struct *t); @@ -447,12 +442,16 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,  static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);  #ifdef CONFIG_RCU_NOCB_CPU  static void __init rcu_organize_nocb_kthreads(void); -#define rcu_nocb_lock_irqsave(rdp, flags)				\ -do {									\ -	if (!rcu_segcblist_is_offloaded(&(rdp)->cblist))		\ -		local_irq_save(flags);					\ -	else								\ -		raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags));	\ + +/* + * Disable IRQs before checking offloaded state so that local + * locking is safe against concurrent de-offloading. + */ +#define rcu_nocb_lock_irqsave(rdp, flags)			\ +do {								\ +	local_irq_save(flags);					\ +	if (rcu_segcblist_is_offloaded(&(rdp)->cblist))	\ +		raw_spin_lock(&(rdp)->nocb_lock);		\  } while (0)  #else /* #ifdef CONFIG_RCU_NOCB_CPU */  #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags) |