aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoerg Roedel <[email protected]>2018-07-20 18:22:23 +0200
committerThomas Gleixner <[email protected]>2018-07-20 22:33:41 +0200
commitd5e84c21dbf5ea458897f88346dc979909eed913 (patch)
tree646bce78d13bda9742d8befcf11aa7a2b4f62572
parent77754cfa09a6c528c38cbca9ee4cc4f7cf6ad6f2 (diff)
x86/entry/32: Check for VM86 mode in slow-path check
The SWITCH_TO_KERNEL_STACK macro only checks for CPL == 0 to go down the slow and paranoid entry path. The problem is that this check also returns true when coming from VM86 mode. This is not a problem by itself, as the paranoid path handles VM86 stack-frames just fine, but it is not necessary as the normal code path handles VM86 mode as well (and faster). Extend the check to include VM86 mode. This also makes an optimization of the paranoid path possible. Signed-off-by: Joerg Roedel <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: "H . Peter Anvin" <[email protected]> Cc: [email protected] Cc: Linus Torvalds <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Juergen Gross <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Jiri Kosina <[email protected]> Cc: Boris Ostrovsky <[email protected]> Cc: Brian Gerst <[email protected]> Cc: David Laight <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: Eduardo Valentin <[email protected]> Cc: Greg KH <[email protected]> Cc: Will Deacon <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: Andrea Arcangeli <[email protected]> Cc: Waiman Long <[email protected]> Cc: Pavel Machek <[email protected]> Cc: "David H . Gutteridge" <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected]
-rw-r--r--arch/x86/entry/entry_32.S12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 010cdb41e3c7..2767c625a52c 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -414,8 +414,16 @@
andl $(0x0000ffff), PT_CS(%esp)
/* Special case - entry from kernel mode via entry stack */
- testl $SEGMENT_RPL_MASK, PT_CS(%esp)
- jz .Lentry_from_kernel_\@
+#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
+ movb PT_CS(%esp), %cl
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
+#else
+ movl PT_CS(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+#endif
+ cmpl $USER_RPL, %ecx
+ jb .Lentry_from_kernel_\@
/* Bytes to copy */
movl $PTREGS_SIZE, %ecx