diff options
Diffstat (limited to 'drivers/perf/riscv_pmu_sbi.c')
| -rw-r--r-- | drivers/perf/riscv_pmu_sbi.c | 132 |
1 files changed, 115 insertions, 17 deletions
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index dca3537a8dcc..6f6681bbfd36 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -17,22 +17,28 @@ #include <linux/irqdomain.h> #include <linux/of_irq.h> #include <linux/of.h> +#include <linux/cpu_pm.h> #include <asm/sbi.h> #include <asm/hwcap.h> -union sbi_pmu_ctr_info { - unsigned long value; - struct { - unsigned long csr:12; - unsigned long width:6; -#if __riscv_xlen == 32 - unsigned long reserved:13; -#else - unsigned long reserved:45; -#endif - unsigned long type:1; - }; +PMU_FORMAT_ATTR(event, "config:0-47"); +PMU_FORMAT_ATTR(firmware, "config:63"); + +static struct attribute *riscv_arch_formats_attr[] = { + &format_attr_event.attr, + &format_attr_firmware.attr, + NULL, +}; + +static struct attribute_group riscv_pmu_format_group = { + .name = "format", + .attrs = riscv_arch_formats_attr, +}; + +static const struct attribute_group *riscv_pmu_attr_groups[] = { + &riscv_pmu_format_group, + NULL, }; /* @@ -274,8 +280,13 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event) cflags |= SBI_PMU_CFG_FLAG_SET_UINH; /* retrieve the available counter index */ +#if defined(CONFIG_32BIT) + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask, + cflags, hwc->event_base, hwc->config, hwc->config >> 32); +#else ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask, cflags, hwc->event_base, hwc->config, 0); +#endif if (ret.error) { pr_debug("Not able to find a counter for event %lx config %llx\n", hwc->event_base, hwc->config); @@ -417,8 +428,13 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) struct hw_perf_event *hwc = &event->hw; unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; +#if defined(CONFIG_32BIT) ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, 1, flag, ival, ival >> 32, 0); +#else + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, + 1, flag, ival, 0, 0); +#endif if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED)) pr_err("Starting counter idx %d failed with error %d\n", hwc->idx, sbi_err_map_linux_errno(ret.error)); @@ -525,8 +541,14 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu, hwc = &event->hw; max_period = riscv_pmu_ctr_get_width_mask(event); init_val = local64_read(&hwc->prev_count) & max_period; +#if defined(CONFIG_32BIT) + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1, + flag, init_val, init_val >> 32, 0); +#else sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1, flag, init_val, 0, 0); +#endif + perf_event_update_userpage(event); } ctr_ovf_mask = ctr_ovf_mask >> 1; idx++; @@ -666,12 +688,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde child = of_get_compatible_child(cpu, "riscv,cpu-intc"); if (!child) { pr_err("Failed to find INTC node\n"); + of_node_put(cpu); return -ENODEV; } domain = irq_find_host(child); of_node_put(child); - if (domain) + if (domain) { + of_node_put(cpu); break; + } } if (!domain) { pr_err("Failed to find INTC IRQ root domain\n"); @@ -693,6 +718,73 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde return 0; } +#ifdef CONFIG_CPU_PM +static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, + void *v) +{ + struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb); + struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); + int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); + struct perf_event *event; + int idx; + + if (!enabled) + return NOTIFY_OK; + + for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) { + event = cpuc->events[idx]; + if (!event) + continue; + + switch (cmd) { + case CPU_PM_ENTER: + /* + * Stop and update the counter + */ + riscv_pmu_stop(event, PERF_EF_UPDATE); + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: + /* + * Restore and enable the counter. + * + * Requires RCU read locking to be functional, + * wrap the call within RCU_NONIDLE to make the + * RCU subsystem aware this cpu is not idle from + * an RCU perspective for the riscv_pmu_start() call + * duration. + */ + RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD)); + break; + default: + break; + } + } + + return NOTIFY_OK; +} + +static int riscv_pm_pmu_register(struct riscv_pmu *pmu) +{ + pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify; + return cpu_pm_register_notifier(&pmu->riscv_pm_nb); +} + +static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) +{ + cpu_pm_unregister_notifier(&pmu->riscv_pm_nb); +} +#else +static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; } +static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { } +#endif + +static void riscv_pmu_destroy(struct riscv_pmu *pmu) +{ + riscv_pm_pmu_unregister(pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); +} + static int pmu_sbi_device_probe(struct platform_device *pdev) { struct riscv_pmu *pmu = NULL; @@ -720,6 +812,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; } + pmu->pmu.attr_groups = riscv_pmu_attr_groups; pmu->num_counters = num_counters; pmu->ctr_start = pmu_sbi_ctr_start; pmu->ctr_stop = pmu_sbi_ctr_stop; @@ -733,14 +826,19 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) if (ret) return ret; + ret = riscv_pm_pmu_register(pmu); + if (ret) + goto out_unregister; + ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); - if (ret) { - cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); - return ret; - } + if (ret) + goto out_unregister; return 0; +out_unregister: + riscv_pmu_destroy(pmu); + out_free: kfree(pmu); return ret; |