diff options
| author | Paul E. McKenney <[email protected]> | 2018-05-08 14:18:57 -0700 | 
|---|---|---|
| committer | Paul E. McKenney <[email protected]> | 2018-07-12 15:39:09 -0700 | 
| commit | 577389423187d8b51dfe6199297e579a3419b72b (patch) | |
| tree | 8cb47b71dc71d38c6f36f698281bf8a05b929393 /kernel/rcu/tree.c | |
| parent | ff3cee39088b1931a432587059d66cd505f785dc (diff) | |
rcu: Add CPU online/offline state to dump_blkd_tasks()
Interactions between CPU-hotplug operations and grace-period
initialization can result in dump_blkd_tasks().  One of the first
debugging actions in this case is to search back in dmesg to work
out which of the affected rcu_node structure's CPUs are online and to
determine the last CPU-hotplug operation affecting any of those CPUs.
This can be laborious and error-prone, especially when console output
is lost.
This commit therefore causes dump_blkd_tasks() to dump the state of
the affected rcu_node structure's CPUs and the last grace period during
which the last offline and online operation affected each of these CPUs.
Signed-off-by: Paul E. McKenney <[email protected]>
Diffstat (limited to 'kernel/rcu/tree.c')
| -rw-r--r-- | kernel/rcu/tree.c | 12 | 
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5f1a11f1f7bc..a2503ef1bbe2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1954,7 +1954,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)  		rcu_gp_slow(rsp, gp_init_delay);  		raw_spin_lock_irqsave_rcu_node(rnp, flags);  		rdp = this_cpu_ptr(rsp->rda); -		rcu_preempt_check_blocked_tasks(rnp); +		rcu_preempt_check_blocked_tasks(rsp, rnp);  		rnp->qsmask = rnp->qsmaskinit;  		WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);  		if (rnp == rdp->mynode) @@ -2063,7 +2063,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)  	rcu_for_each_node_breadth_first(rsp, rnp) {  		raw_spin_lock_irq_rcu_node(rnp);  		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) -			dump_blkd_tasks(rnp, 10); +			dump_blkd_tasks(rsp, rnp, 10);  		WARN_ON_ONCE(rnp->qsmask);  		WRITE_ONCE(rnp->gp_seq, new_gp_seq);  		rdp = this_cpu_ptr(rsp->rda); @@ -3516,6 +3516,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)  	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);  	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);  	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); +	rdp->rcu_ofl_gp_seq = rsp->gp_seq; +	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; +	rdp->rcu_onl_gp_seq = rsp->gp_seq; +	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;  	rdp->cpu = cpu;  	rdp->rsp = rsp;  	rcu_boot_init_nocb_percpu_data(rdp); @@ -3711,6 +3715,8 @@ void rcu_cpu_starting(unsigned int cpu)  		/* Allow lockless access for expedited grace periods. */  		smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */  		rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ +		rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq); +		rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);  		if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */  			/* Report QS -after- changing ->qsmaskinitnext! */  			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); @@ -3738,6 +3744,8 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)  	mask = rdp->grpmask;  	spin_lock(&rsp->ofl_lock);  	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ +	rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq); +	rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);  	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */  		/* Report quiescent state -before- changing ->qsmaskinitnext! */  		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);  |