aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Lutomirski <[email protected]>2017-08-14 22:36:19 -0700
committerIngo Molnar <[email protected]>2017-08-15 10:10:58 +0200
commitfa2016a8e7d846b306e431646d250500e1da0c33 (patch)
treec3fb53e1173b7e2dacfd7b9f4b04d29fd5845b33
parentbf4d1a83758368c842c94cab9661a75ca98bc848 (diff)
x86/xen/64: Fix the reported SS and CS in SYSCALL
When I cleaned up the Xen SYSCALL entries, I inadvertently changed the reported segment registers. Before my patch, regs->ss was __USER(32)_DS and regs->cs was __USER(32)_CS. After the patch, they are FLAT_USER_CS/DS(32). This had a couple unfortunate effects. It confused the opportunistic fast return logic. It also significantly increased the risk of triggering a nasty glibc bug: https://sourceware.org/bugzilla/show_bug.cgi?id=21269 Update the Xen entry code to change it back. Reported-by: Brian Gerst <[email protected]> Signed-off-by: Andy Lutomirski <[email protected]> Cc: Andrew Cooper <[email protected]> Cc: Boris Ostrovsky <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Juergen Gross <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Fixes: 8a9949bc71a7 ("x86/xen/64: Rearrange the SYSCALL entries") Link: http://lkml.kernel.org/r/daba8351ea2764bb30272296ab9ce08a81bd8264.1502775273.git.luto@kernel.org Signed-off-by: Ingo Molnar <[email protected]>
-rw-r--r--arch/x86/xen/xen-asm_64.S18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index a8a4f4c460a6..c5fee2680abc 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -88,6 +88,15 @@ RELOC(xen_sysret64, 1b+1)
ENTRY(xen_syscall_target)
popq %rcx
popq %r11
+
+ /*
+ * Neither Xen nor the kernel really knows what the old SS and
+ * CS were. The kernel expects __USER_DS and __USER_CS, so
+ * report those values even though Xen will guess its own values.
+ */
+ movq $__USER_DS, 4*8(%rsp)
+ movq $__USER_CS, 1*8(%rsp)
+
jmp entry_SYSCALL_64_after_hwframe
ENDPROC(xen_syscall_target)
@@ -97,6 +106,15 @@ ENDPROC(xen_syscall_target)
ENTRY(xen_syscall32_target)
popq %rcx
popq %r11
+
+ /*
+ * Neither Xen nor the kernel really knows what the old SS and
+ * CS were. The kernel expects __USER32_DS and __USER32_CS, so
+ * report those values even though Xen will guess its own values.
+ */
+ movq $__USER32_DS, 4*8(%rsp)
+ movq $__USER32_CS, 1*8(%rsp)
+
jmp entry_SYSCALL_compat_after_hwframe
ENDPROC(xen_syscall32_target)