aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/entry/entry_32.S
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2018-07-20 18:22:23 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-07-20 22:33:41 +0200
commitd5e84c21dbf5ea458897f88346dc979909eed913 (patch)
tree646bce78d13bda9742d8befcf11aa7a2b4f62572 /arch/x86/entry/entry_32.S
parentperf/core: Make sure the ring-buffer is mapped in all page-tables (diff)
downloadlinux-dev-d5e84c21dbf5ea458897f88346dc979909eed913.tar.xz
linux-dev-d5e84c21dbf5ea458897f88346dc979909eed913.zip
x86/entry/32: Check for VM86 mode in slow-path check
The SWITCH_TO_KERNEL_STACK macro only checks for CPL == 0 to go down the slow and paranoid entry path. The problem is that this check also returns true when coming from VM86 mode. This is not a problem by itself, as the paranoid path handles VM86 stack-frames just fine, but it is not necessary as the normal code path handles VM86 mode as well (and faster). Extend the check to include VM86 mode. This also makes an optimization of the paranoid path possible. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1532103744-31902-3-git-send-email-joro@8bytes.org
Diffstat (limited to 'arch/x86/entry/entry_32.S')
-rw-r--r--arch/x86/entry/entry_32.S12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 010cdb41e3c7..2767c625a52c 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -414,8 +414,16 @@
andl $(0x0000ffff), PT_CS(%esp)
/* Special case - entry from kernel mode via entry stack */
- testl $SEGMENT_RPL_MASK, PT_CS(%esp)
- jz .Lentry_from_kernel_\@
+#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
+ movb PT_CS(%esp), %cl
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
+#else
+ movl PT_CS(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+#endif
+ cmpl $USER_RPL, %ecx
+ jb .Lentry_from_kernel_\@
/* Bytes to copy */
movl $PTREGS_SIZE, %ecx