aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/entry
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2019-11-20 09:49:33 +0100
committerPeter Zijlstra <peterz@infradead.org>2019-11-21 19:37:43 +0100
commit4c4fd55d3d59a41ddfa6ecba7e76928921759f43 (patch)
tree6ba323bbb4d8e772ecb6679ce73b0d0260f8fcfe /arch/x86/entry
parentx86/entry/32: Fix IRET exception (diff)
downloadlinux-dev-4c4fd55d3d59a41ddfa6ecba7e76928921759f43.tar.xz
linux-dev-4c4fd55d3d59a41ddfa6ecba7e76928921759f43.zip
x86/entry/32: Use %ss segment where required
When re-building the IRET frame we use %eax as an destination %esp, make sure to then also match the segment for when there is a nonzero SS base (ESPFIX). [peterz: Changelog and minor edits] Fixes: 3c88c692c287 ("x86/stackframe/32: Provide consistent pt_regs") Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: stable@kernel.org
Diffstat (limited to 'arch/x86/entry')
-rw-r--r--arch/x86/entry/entry_32.S19
1 files changed, 14 insertions, 5 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index f4335ac9692f..341597ecdcb5 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -210,6 +210,8 @@
/*
* The high bits of the CS dword (__csh) are used for CS_FROM_*.
* Clear them in case hardware didn't do this for us.
+ *
+ * Be careful: we may have nonzero SS base due to ESPFIX.
*/
andl $0x0000ffff, 3*4(%esp)
@@ -263,6 +265,13 @@
.endm
.macro IRET_FRAME
+ /*
+ * We're called with %ds, %es, %fs, and %gs from the interrupted
+ * frame, so we shouldn't use them. Also, we may be in ESPFIX
+ * mode and therefore have a nonzero SS base and an offset ESP,
+ * so any attempt to access the stack needs to use SS. (except for
+ * accesses through %esp, which automatically use SS.)
+ */
testl $CS_FROM_KERNEL, 1*4(%esp)
jz .Lfinished_frame_\@
@@ -276,20 +285,20 @@
movl 5*4(%esp), %eax # (modified) regs->sp
movl 4*4(%esp), %ecx # flags
- movl %ecx, -4(%eax)
+ movl %ecx, %ss:-1*4(%eax)
movl 3*4(%esp), %ecx # cs
andl $0x0000ffff, %ecx
- movl %ecx, -8(%eax)
+ movl %ecx, %ss:-2*4(%eax)
movl 2*4(%esp), %ecx # ip
- movl %ecx, -12(%eax)
+ movl %ecx, %ss:-3*4(%eax)
movl 1*4(%esp), %ecx # eax
- movl %ecx, -16(%eax)
+ movl %ecx, %ss:-4*4(%eax)
popl %ecx
- lea -16(%eax), %esp
+ lea -4*4(%eax), %esp
popl %eax
.Lfinished_frame_\@:
.endm