diff options
author | Peter Zijlstra <peterz@infradead.org> | 2022-05-10 21:28:25 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-09-07 21:54:02 +0200 |
commit | 28f0f3c44b5c35be657a4f922dcdfb48285f4373 (patch) | |
tree | 0be9accbe824b6ff579eb094cff959a09c90ebd8 /arch | |
parent | e577bb17a1eaa35b86ee873a786e603be768d668 (diff) |
perf/x86: Change x86_pmu::limit_period signature
In preparation for making it a static_call, change the signature.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220829101321.573713839@infradead.org
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/events/amd/core.c | 8 | ||||
-rw-r--r-- | arch/x86/events/core.c | 13 | ||||
-rw-r--r-- | arch/x86/events/intel/core.c | 19 | ||||
-rw-r--r-- | arch/x86/events/perf_event.h | 2 |
4 files changed, 20 insertions, 22 deletions
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index bd99d2ae14c3..8b70237c33f7 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -1224,16 +1224,14 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config) return x86_event_sysfs_show(page, config, event); } -static u64 amd_pmu_limit_period(struct perf_event *event, u64 left) +static void amd_pmu_limit_period(struct perf_event *event, s64 *left) { /* * Decrease period by the depth of the BRS feature to get the last N * taken branches and approximate the desired period */ - if (has_branch_stack(event) && left > x86_pmu.lbr_nr) - left -= x86_pmu.lbr_nr; - - return left; + if (has_branch_stack(event) && *left > x86_pmu.lbr_nr) + *left -= x86_pmu.lbr_nr; } static __initconst const struct x86_pmu amd_pmu = { diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index b074e71bab21..1e90bc7ca36f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -621,8 +621,9 @@ int x86_pmu_hw_config(struct perf_event *event) event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; if (event->attr.sample_period && x86_pmu.limit_period) { - if (x86_pmu.limit_period(event, event->attr.sample_period) > - event->attr.sample_period) + s64 left = event->attr.sample_period; + x86_pmu.limit_period(event, &left); + if (left > event->attr.sample_period) return -EINVAL; } @@ -1396,9 +1397,9 @@ int x86_perf_event_set_period(struct perf_event *event) left = x86_pmu.max_period; if (x86_pmu.limit_period) - left = x86_pmu.limit_period(event, left); + x86_pmu.limit_period(event, &left); - per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; + this_cpu_write(pmc_prev_left[idx], left); /* * The hw event starts counting from this event offset, @@ -2677,7 +2678,9 @@ static int x86_pmu_check_period(struct perf_event *event, u64 value) return -EINVAL; if (value && x86_pmu.limit_period) { - if (x86_pmu.limit_period(event, value) > value) + s64 left = value; + x86_pmu.limit_period(event, &left); + if (left > value) return -EINVAL; } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index feed732fdf57..92cc390590d1 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4344,28 +4344,25 @@ static u8 adl_get_hybrid_cpu_type(void) * Therefore the effective (average) period matches the requested period, * despite coarser hardware granularity. */ -static u64 bdw_limit_period(struct perf_event *event, u64 left) +static void bdw_limit_period(struct perf_event *event, s64 *left) { if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xc0, .umask=0x01)) { - if (left < 128) - left = 128; - left &= ~0x3fULL; + if (*left < 128) + *left = 128; + *left &= ~0x3fULL; } - return left; } -static u64 nhm_limit_period(struct perf_event *event, u64 left) +static void nhm_limit_period(struct perf_event *event, s64 *left) { - return max(left, 32ULL); + *left = max(*left, 32LL); } -static u64 spr_limit_period(struct perf_event *event, u64 left) +static void spr_limit_period(struct perf_event *event, s64 *left) { if (event->attr.precise_ip == 3) - return max(left, 128ULL); - - return left; + *left = max(*left, 128LL); } PMU_FORMAT_ATTR(event, "config:0-7" ); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 7ae1a6c5368c..e82d2d212534 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -781,7 +781,7 @@ struct x86_pmu { struct event_constraint *event_constraints; struct x86_pmu_quirk *quirks; int perfctr_second_write; - u64 (*limit_period)(struct perf_event *event, u64 l); + void (*limit_period)(struct perf_event *event, s64 *l); /* PMI handler bits */ unsigned int late_ack :1, |