diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 34 | 
1 files changed, 22 insertions, 12 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index fb88a028deec..84fbee4686d3 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -180,7 +180,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)  			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);  	struct task_struct *t = current; -	lockdep_assert_held(&rnp->lock); +	raw_lockdep_assert_held_rcu_node(rnp);  	WARN_ON_ONCE(rdp->mynode != rnp);  	WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1); @@ -560,8 +560,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)  	}  	t = list_entry(rnp->gp_tasks->prev,  		       struct task_struct, rcu_node_entry); -	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) +	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { +		/* +		 * We could be printing a lot while holding a spinlock. +		 * Avoid triggering hard lockup. +		 */ +		touch_nmi_watchdog();  		sched_show_task(t); +	}  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);  } @@ -957,14 +963,10 @@ static int rcu_boost(struct rcu_node *rnp)  	 * expedited grace period must boost all blocked tasks, including  	 * those blocking the pre-existing normal grace period.  	 */ -	if (rnp->exp_tasks != NULL) { +	if (rnp->exp_tasks != NULL)  		tb = rnp->exp_tasks; -		rnp->n_exp_boosts++; -	} else { +	else  		tb = rnp->boost_tasks; -		rnp->n_normal_boosts++; -	} -	rnp->n_tasks_boosted++;  	/*  	 * We boost task t by manufacturing an rt_mutex that appears to @@ -1042,7 +1044,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)  {  	struct task_struct *t; -	lockdep_assert_held(&rnp->lock); +	raw_lockdep_assert_held_rcu_node(rnp);  	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);  		return; @@ -1677,6 +1679,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)  	char *ticks_title;  	unsigned long ticks_value; +	/* +	 * We could be printing a lot while holding a spinlock.  Avoid +	 * triggering hard lockup. +	 */ +	touch_nmi_watchdog(); +  	if (rsp->gpnum == rdp->gpnum) {  		ticks_title = "ticks this GP";  		ticks_value = rdp->ticks_this_gp; @@ -2235,7 +2243,6 @@ static int rcu_nocb_kthread(void *arg)  		smp_mb__before_atomic();  /* _add after CB invocation. */  		atomic_long_add(-c, &rdp->nocb_q_count);  		atomic_long_add(-cl, &rdp->nocb_q_count_lazy); -		rdp->n_nocbs_invoked += c;  	}  	return 0;  } @@ -2312,8 +2319,11 @@ void __init rcu_init_nohz(void)  		cpumask_and(rcu_nocb_mask, cpu_possible_mask,  			    rcu_nocb_mask);  	} -	pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n", -		cpumask_pr_args(rcu_nocb_mask)); +	if (cpumask_empty(rcu_nocb_mask)) +		pr_info("\tOffload RCU callbacks from CPUs: (none).\n"); +	else +		pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n", +			cpumask_pr_args(rcu_nocb_mask));  	if (rcu_nocb_poll)  		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");  |