diff options
| author | Greg Kroah-Hartman <[email protected]> | 2022-01-30 15:00:39 +0100 | 
|---|---|---|
| committer | Greg Kroah-Hartman <[email protected]> | 2022-01-30 15:00:39 +0100 | 
| commit | 7ab004dbcbee38b8a70798835d3ffcd97a985a5e (patch) | |
| tree | 0caa6cb97801736046823ca785a5ba36bf684ac6 /arch/powerpc/perf/core-book3s.c | |
| parent | 710f8af199ee9d72dd87083edd55c5ee250ee6f4 (diff) | |
| parent | 26291c54e111ff6ba87a164d85d4a4e134b7315c (diff) | |
Merge tag 'v5.17-rc2' into char-misc-next
We need the char/misc fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Diffstat (limited to 'arch/powerpc/perf/core-book3s.c')
| -rw-r--r-- | arch/powerpc/perf/core-book3s.c | 75 | 
1 files changed, 42 insertions, 33 deletions
| diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index a684901b6965..b5b42cf0a703 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -776,6 +776,34 @@ static void pmao_restore_workaround(bool ebb)  	mtspr(SPRN_PMC6, pmcs[5]);  } +/* + * If the perf subsystem wants performance monitor interrupts as soon as + * possible (e.g., to sample the instruction address and stack chain), + * this should return true. The IRQ masking code can then enable MSR[EE] + * in some places (e.g., interrupt handlers) that allows PMI interrupts + * through to improve accuracy of profiles, at the cost of some performance. + * + * The PMU counters can be enabled by other means (e.g., sysfs raw SPR + * access), but in that case there is no need for prompt PMI handling. + * + * This currently returns true if any perf counter is being used. It + * could possibly return false if only events are being counted rather than + * samples being taken, but for now this is good enough. + */ +bool power_pmu_wants_prompt_pmi(void) +{ +	struct cpu_hw_events *cpuhw; + +	/* +	 * This could simply test local_paca->pmcregs_in_use if that were not +	 * under ifdef KVM. +	 */ +	if (!ppmu) +		return false; + +	cpuhw = this_cpu_ptr(&cpu_hw_events); +	return cpuhw->n_events; +}  #endif /* CONFIG_PPC64 */  static void perf_event_interrupt(struct pt_regs *regs); @@ -1327,9 +1355,20 @@ static void power_pmu_disable(struct pmu *pmu)  		 * Otherwise provide a warning if there is PMI pending, but  		 * no counter is found overflown.  		 */ -		if (any_pmc_overflown(cpuhw)) -			clear_pmi_irq_pending(); -		else +		if (any_pmc_overflown(cpuhw)) { +			/* +			 * Since power_pmu_disable runs under local_irq_save, it +			 * could happen that code hits a PMC overflow without PMI +			 * pending in paca. Hence only clear PMI pending if it was +			 * set. +			 * +			 * If a PMI is pending, then MSR[EE] must be disabled (because +			 * the masked PMI handler disabling EE). So it is safe to +			 * call clear_pmi_irq_pending(). +			 */ +			if (pmi_irq_pending()) +				clear_pmi_irq_pending(); +		} else  			WARN_ON(pmi_irq_pending());  		val = mmcra = cpuhw->mmcr.mmcra; @@ -2438,36 +2477,6 @@ static void perf_event_interrupt(struct pt_regs *regs)  	perf_sample_event_took(sched_clock() - start_clock);  } -/* - * If the perf subsystem wants performance monitor interrupts as soon as - * possible (e.g., to sample the instruction address and stack chain), - * this should return true. The IRQ masking code can then enable MSR[EE] - * in some places (e.g., interrupt handlers) that allows PMI interrupts - * though to improve accuracy of profiles, at the cost of some performance. - * - * The PMU counters can be enabled by other means (e.g., sysfs raw SPR - * access), but in that case there is no need for prompt PMI handling. - * - * This currently returns true if any perf counter is being used. It - * could possibly return false if only events are being counted rather than - * samples being taken, but for now this is good enough. - */ -bool power_pmu_wants_prompt_pmi(void) -{ -	struct cpu_hw_events *cpuhw; - -	/* -	 * This could simply test local_paca->pmcregs_in_use if that were not -	 * under ifdef KVM. -	 */ - -	if (!ppmu) -		return false; - -	cpuhw = this_cpu_ptr(&cpu_hw_events); -	return cpuhw->n_events; -} -  static int power_pmu_prepare_cpu(unsigned int cpu)  {  	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |