diff options
Diffstat (limited to 'kernel/rcu/srcutree.c')
| -rw-r--r-- | kernel/rcu/srcutree.c | 100 | 
1 files changed, 84 insertions, 16 deletions
| diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 1c304fec89c0..ca4b5dcec675 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -417,7 +417,7 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)  	for_each_possible_cpu(cpu) {  		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); -		sum += READ_ONCE(cpuc->srcu_lock_count[idx]); +		sum += atomic_long_read(&cpuc->srcu_lock_count[idx]);  	}  	return sum;  } @@ -429,13 +429,18 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)  static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)  {  	int cpu; +	unsigned long mask = 0;  	unsigned long sum = 0;  	for_each_possible_cpu(cpu) {  		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); -		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); +		sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]); +		if (IS_ENABLED(CONFIG_PROVE_RCU)) +			mask = mask | READ_ONCE(cpuc->srcu_nmi_safety);  	} +	WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask >> 1)), +		  "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);  	return sum;  } @@ -503,10 +508,10 @@ static bool srcu_readers_active(struct srcu_struct *ssp)  	for_each_possible_cpu(cpu) {  		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); -		sum += READ_ONCE(cpuc->srcu_lock_count[0]); -		sum += READ_ONCE(cpuc->srcu_lock_count[1]); -		sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); -		sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); +		sum += atomic_long_read(&cpuc->srcu_lock_count[0]); +		sum += atomic_long_read(&cpuc->srcu_lock_count[1]); +		sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]); +		sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]);  	}  	return sum;  } @@ -626,6 +631,29 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)  }  EXPORT_SYMBOL_GPL(cleanup_srcu_struct); +#ifdef CONFIG_PROVE_RCU +/* + * Check for consistent NMI safety. + */ +void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe) +{ +	int nmi_safe_mask = 1 << nmi_safe; +	int old_nmi_safe_mask; +	struct srcu_data *sdp; + +	/* NMI-unsafe use in NMI is a bad sign */ +	WARN_ON_ONCE(!nmi_safe && in_nmi()); +	sdp = raw_cpu_ptr(ssp->sda); +	old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety); +	if (!old_nmi_safe_mask) { +		WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask); +		return; +	} +	WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask); +} +EXPORT_SYMBOL_GPL(srcu_check_nmi_safety); +#endif /* CONFIG_PROVE_RCU */ +  /*   * Counts the new reader in the appropriate per-CPU element of the   * srcu_struct. @@ -636,7 +664,7 @@ int __srcu_read_lock(struct srcu_struct *ssp)  	int idx;  	idx = READ_ONCE(ssp->srcu_idx) & 0x1; -	this_cpu_inc(ssp->sda->srcu_lock_count[idx]); +	this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);  	smp_mb(); /* B */  /* Avoid leaking the critical section. */  	return idx;  } @@ -650,10 +678,45 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);  void __srcu_read_unlock(struct srcu_struct *ssp, int idx)  {  	smp_mb(); /* C */  /* Avoid leaking the critical section. */ -	this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); +	this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);  }  EXPORT_SYMBOL_GPL(__srcu_read_unlock); +#ifdef CONFIG_NEED_SRCU_NMI_SAFE + +/* + * Counts the new reader in the appropriate per-CPU element of the + * srcu_struct, but in an NMI-safe manner using RMW atomics. + * Returns an index that must be passed to the matching srcu_read_unlock(). + */ +int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) +{ +	int idx; +	struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); + +	idx = READ_ONCE(ssp->srcu_idx) & 0x1; +	atomic_long_inc(&sdp->srcu_lock_count[idx]); +	smp_mb__after_atomic(); /* B */  /* Avoid leaking the critical section. */ +	return idx; +} +EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe); + +/* + * Removes the count for the old reader from the appropriate per-CPU + * element of the srcu_struct.  Note that this may well be a different + * CPU than that which was incremented by the corresponding srcu_read_lock(). + */ +void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) +{ +	struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); + +	smp_mb__before_atomic(); /* C */  /* Avoid leaking the critical section. */ +	atomic_long_inc(&sdp->srcu_unlock_count[idx]); +} +EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe); + +#endif // CONFIG_NEED_SRCU_NMI_SAFE +  /*   * Start an SRCU grace period.   */ @@ -1090,7 +1153,12 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,  	int ss_state;  	check_init_srcu_struct(ssp); -	idx = srcu_read_lock(ssp); +	/* +	 * While starting a new grace period, make sure we are in an +	 * SRCU read-side critical section so that the grace-period +	 * sequence number cannot wrap around in the meantime. +	 */ +	idx = __srcu_read_lock_nmisafe(ssp);  	ss_state = smp_load_acquire(&ssp->srcu_size_state);  	if (ss_state < SRCU_SIZE_WAIT_CALL)  		sdp = per_cpu_ptr(ssp->sda, 0); @@ -1123,7 +1191,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,  		srcu_funnel_gp_start(ssp, sdp, s, do_norm);  	else if (needexp)  		srcu_funnel_exp_start(ssp, sdp_mynode, s); -	srcu_read_unlock(ssp, idx); +	__srcu_read_unlock_nmisafe(ssp, idx);  	return s;  } @@ -1427,13 +1495,13 @@ void srcu_barrier(struct srcu_struct *ssp)  	/* Initial count prevents reaching zero until all CBs are posted. */  	atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); -	idx = srcu_read_lock(ssp); +	idx = __srcu_read_lock_nmisafe(ssp);  	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)  		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));  	else  		for_each_possible_cpu(cpu)  			srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu)); -	srcu_read_unlock(ssp, idx); +	__srcu_read_unlock_nmisafe(ssp, idx);  	/* Remove the initial count, at which point reaching zero can happen. */  	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) @@ -1687,8 +1755,8 @@ void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)  			struct srcu_data *sdp;  			sdp = per_cpu_ptr(ssp->sda, cpu); -			u0 = data_race(sdp->srcu_unlock_count[!idx]); -			u1 = data_race(sdp->srcu_unlock_count[idx]); +			u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx])); +			u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx]));  			/*  			 * Make sure that a lock is always counted if the corresponding @@ -1696,8 +1764,8 @@ void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)  			 */  			smp_rmb(); -			l0 = data_race(sdp->srcu_lock_count[!idx]); -			l1 = data_race(sdp->srcu_lock_count[idx]); +			l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx])); +			l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx]));  			c0 = l0 - u0;  			c1 = l1 - u1; |