diff options
author | Heiko Carstens <hca@linux.ibm.com> | 2024-04-29 14:28:48 +0200 |
---|---|---|
committer | Alexander Gordeev <agordeev@linux.ibm.com> | 2024-05-14 13:37:07 +0200 |
commit | 62b672c4ba90e726cc39b5c3d6dffd1ca817e143 (patch) | |
tree | 897868b63b1b6ae04a48a9f002bf25e830fb1056 /arch/s390/kernel/stacktrace.c | |
parent | be72ea09c1a5273abf8c6c52ef53e36c701cbf6a (diff) |
s390/stackstrace: Detect vdso stack frames
Clear the backchain of the extra stack frame added by the vdso user wrapper
code. This allows the user stack walker to detect and skip the non-standard
stack frame. Without this an incorrect instruction pointer would be added
to stack traces, and stack frame walking would be continued with a more or
less random back chain.
Fixes: aa44433ac4ee ("s390: add USER_STACKTRACE support")
Reviewed-by: Jens Remus <jremus@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Diffstat (limited to 'arch/s390/kernel/stacktrace.c')
-rw-r--r-- | arch/s390/kernel/stacktrace.c | 28 |
1 files changed, 24 insertions, 4 deletions
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index b4485b0c7f06..640363b2a105 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -92,10 +92,16 @@ static inline bool ip_invalid(unsigned long ip) return false; } +static inline bool ip_within_vdso(unsigned long ip) +{ + return in_range(ip, current->mm->context.vdso_base, vdso_text_size()); +} + void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, struct perf_callchain_entry_ctx *entry, const struct pt_regs *regs, bool perf) { + struct stack_frame_vdso_wrapper __user *sf_vdso; struct stack_frame_user __user *sf; unsigned long ip, sp; bool first = true; @@ -112,11 +118,25 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo while (1) { if (__get_user(sp, &sf->back_chain)) break; + /* + * VDSO entry code has a non-standard stack frame layout. + * See VDSO user wrapper code for details. + */ + if (!sp && ip_within_vdso(ip)) { + sf_vdso = (void __user *)sf; + if (__get_user(ip, &sf_vdso->return_address)) + break; + sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD; + sf = (void __user *)sp; + if (__get_user(sp, &sf->back_chain)) + break; + } else { + sf = (void __user *)sp; + if (__get_user(ip, &sf->gprs[8])) + break; + } /* Sanity check: ABI requires SP to be 8 byte aligned. */ - if (!sp || sp & 0x7) - break; - sf = (void __user *)sp; - if (__get_user(ip, &sf->gprs[8])) + if (sp & 0x7) break; if (ip_invalid(ip)) { /* |