From 08280e6c4c2e8049ac61d9e8e3536ec1df629c0d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 14 Oct 2012 17:59:40 -0700 Subject: sparc64: Like x86 we should check current->mm during perf backtrace generation. If the MM is not active, only report the top-level PC. Do not try to access the address space. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/sparc/kernel/perf_event.c') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index e48651dace1b..9e96f849a744 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1738,8 +1738,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] + STACK_BIAS; do { struct sparc_stackf *usf, sf; @@ -1760,8 +1758,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; do { struct sparc_stackf32 *usf, sf; @@ -1780,6 +1776,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { + perf_callchain_store(entry, regs->tpc); + + if (!current->mm) + return; + flushw_user(); if (test_thread_flag(TIF_32BIT)) perf_callchain_user_32(entry, regs); -- cgit From e793d8c6740f8fe704fa216e95685f4d92c4c4b9 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 16 Oct 2012 13:05:25 -0700 Subject: sparc64: Fix bit twiddling in sparc_pmu_enable_event(). There was a serious disconnect in the logic happening in sparc_pmu_disable_event() vs. sparc_pmu_enable_event(). Event disable is implemented by programming a NOP event into the PCR. However, event enable was not reversing this operation. Instead, it was setting the User/Priv/Hypervisor trace enable bits. That's not sparc_pmu_enable_event()'s job, that's what sparc_pmu_enable() and sparc_pmu_disable() do . The intent of sparc_pmu_enable_event() is clear, since it first clear out the event type encoding field. So fix this by OR'ing in the event encoding rather than the trace enable bits. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/sparc/kernel/perf_event.c') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 9e96f849a744..885a8af74064 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -817,15 +817,17 @@ static u64 nop_for_index(int idx) static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { - u64 val, mask = mask_for_index(idx); + u64 enc, val, mask = mask_for_index(idx); int pcr_index = 0; if (sparc_pmu->num_pcrs > 1) pcr_index = idx; + enc = perf_event_get_enc(cpuc->events[idx]); + val = cpuc->pcr[pcr_index]; val &= ~mask; - val |= hwc->config; + val |= event_encoding(enc, idx); cpuc->pcr[pcr_index] = val; pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); -- cgit