aboutsummaryrefslogtreecommitdiff
path: root/drivers/perf/arm_pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/perf/arm_pmu.c')
-rw-r--r--drivers/perf/arm_pmu.c49
1 files changed, 9 insertions, 40 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 3f07df5a7e95..15bd1e34a88e 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -514,9 +514,6 @@ static int armpmu_event_init(struct perf_event *event)
if (has_branch_stack(event))
return -EOPNOTSUPP;
- if (armpmu->map_event(event) == -ENOENT)
- return -ENOENT;
-
return __hw_perf_event_init(event);
}
@@ -550,17 +547,10 @@ static void armpmu_disable(struct pmu *pmu)
* microarchitecture, and aren't suitable for another. Thus, only match CPUs of
* the same microarchitecture.
*/
-static int armpmu_filter_match(struct perf_event *event)
+static bool armpmu_filter(struct pmu *pmu, int cpu)
{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- unsigned int cpu = smp_processor_id();
- int ret;
-
- ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
- if (ret && armpmu->filter_match)
- return armpmu->filter_match(event);
-
- return ret;
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
+ return !cpumask_test_cpu(cpu, &armpmu->supported_cpus);
}
static ssize_t cpus_show(struct device *dev,
@@ -762,17 +752,8 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
case CPU_PM_ENTER_FAILED:
/*
* Restore and enable the counter.
- * armpmu_start() indirectly calls
- *
- * perf_event_update_userpage()
- *
- * that requires RCU read locking to be functional,
- * wrap the call within RCU_NONIDLE to make the
- * RCU subsystem aware this cpu is not idle from
- * an RCU perspective for the armpmu_start() call
- * duration.
*/
- RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
+ armpmu_start(event, PERF_EF_RELOAD);
break;
default:
break;
@@ -861,16 +842,16 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
&cpu_pmu->node);
}
-static struct arm_pmu *__armpmu_alloc(gfp_t flags)
+struct arm_pmu *armpmu_alloc(void)
{
struct arm_pmu *pmu;
int cpu;
- pmu = kzalloc(sizeof(*pmu), flags);
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
if (!pmu)
goto out;
- pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
+ pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL);
if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu;
@@ -885,14 +866,13 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
- .filter_match = armpmu_filter_match,
+ .filter = armpmu_filter,
.attr_groups = pmu->attr_groups,
/*
* This is a CPU PMU potentially in a heterogeneous
* configuration (e.g. big.LITTLE). This is not an uncore PMU,
* and we have taken ctx sharing into account (e.g. with our
- * pmu::filter_match callback and pmu::event_init group
- * validation).
+ * pmu::filter callback and pmu::event_init group validation).
*/
.capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS,
};
@@ -916,17 +896,6 @@ out:
return NULL;
}
-struct arm_pmu *armpmu_alloc(void)
-{
- return __armpmu_alloc(GFP_KERNEL);
-}
-
-struct arm_pmu *armpmu_alloc_atomic(void)
-{
- return __armpmu_alloc(GFP_ATOMIC);
-}
-
-
void armpmu_free(struct arm_pmu *pmu)
{
free_percpu(pmu->hw_events);