diff options
Diffstat (limited to 'kernel/rcu')
| -rw-r--r-- | kernel/rcu/rcuperf.c | 2 | ||||
| -rw-r--r-- | kernel/rcu/tree.c | 32 | 
2 files changed, 26 insertions, 8 deletions
| diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 16dd1e6b7c09..9eb39c20082c 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -723,7 +723,7 @@ kfree_perf_init(void)  		schedule_timeout_uninterruptible(1);  	} -	pr_alert("kfree object size=%lu\n", kfree_mult * sizeof(struct kfree_obj)); +	pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));  	kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),  			       GFP_KERNEL); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index c716eadc7617..6c6569e0586c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -250,7 +250,7 @@ static noinstr void rcu_dynticks_eqs_enter(void)  	 * next idle sojourn.  	 */  	rcu_dynticks_task_trace_enter();  // Before ->dynticks update! -	seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); +	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);  	// RCU is no longer watching.  Better be in extended quiescent state!  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&  		     (seq & RCU_DYNTICK_CTRL_CTR)); @@ -274,13 +274,13 @@ static noinstr void rcu_dynticks_eqs_exit(void)  	 * and we also must force ordering with the next RCU read-side  	 * critical section.  	 */ -	seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); +	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);  	// RCU is now watching.  Better not be in an extended quiescent state!  	rcu_dynticks_task_trace_exit();  // After ->dynticks update!  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&  		     !(seq & RCU_DYNTICK_CTRL_CTR));  	if (seq & RCU_DYNTICK_CTRL_MASK) { -		atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); +		arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);  		smp_mb__after_atomic(); /* _exit after clearing mask. */  	}  } @@ -313,7 +313,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)  {  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data); -	return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); +	return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);  }  /* @@ -633,6 +633,10 @@ static noinstr void rcu_eqs_enter(bool user)  	do_nocb_deferred_wakeup(rdp);  	rcu_prepare_for_idle();  	rcu_preempt_deferred_qs(current); + +	// instrumentation for the noinstr rcu_dynticks_eqs_enter() +	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); +  	instrumentation_end();  	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */  	// RCU is watching here ... @@ -692,6 +696,7 @@ noinstr void rcu_nmi_exit(void)  {  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data); +	instrumentation_begin();  	/*  	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.  	 * (We are exiting an NMI handler, so RCU better be paying attention @@ -705,7 +710,6 @@ noinstr void rcu_nmi_exit(void)  	 * leave it in non-RCU-idle state.  	 */  	if (rdp->dynticks_nmi_nesting != 1) { -		instrumentation_begin();  		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,  				  atomic_read(&rdp->dynticks));  		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ @@ -714,13 +718,15 @@ noinstr void rcu_nmi_exit(void)  		return;  	} -	instrumentation_begin();  	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */  	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));  	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */  	if (!in_nmi())  		rcu_prepare_for_idle(); + +	// instrumentation for the noinstr rcu_dynticks_eqs_enter() +	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));  	instrumentation_end();  	// RCU is watching here ... @@ -838,6 +844,10 @@ static void noinstr rcu_eqs_exit(bool user)  	rcu_dynticks_eqs_exit();  	// ... but is watching here.  	instrumentation_begin(); + +	// instrumentation for the noinstr rcu_dynticks_eqs_exit() +	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); +  	rcu_cleanup_after_idle();  	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); @@ -983,13 +993,21 @@ noinstr void rcu_nmi_enter(void)  		if (!in_nmi())  			rcu_cleanup_after_idle(); +		instrumentation_begin(); +		// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs() +		instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); +		// instrumentation for the noinstr rcu_dynticks_eqs_exit() +		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); +  		incby = 1;  	} else if (!in_nmi()) {  		instrumentation_begin();  		rcu_irq_enter_check_tick();  		instrumentation_end(); +	} else  { +		instrumentation_begin();  	} -	instrumentation_begin(); +  	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),  			  rdp->dynticks_nmi_nesting,  			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); |