aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/entry/entry_64_compat.S
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-05-12 18:17:12 +0200
committerThomas Gleixner <tglx@linutronix.de>2020-06-11 15:14:40 +0200
commit1c3e5d3f60e26415d4227aa1193cf9e2db4df834 (patch)
tree243f10f5e8d725c4417529c8d5f514f31fb06adf /arch/x86/entry/entry_64_compat.S
parentx86/speculation/mds: Mark mds_user_clear_cpu_buffers() __always_inline (diff)
downloadlinux-dev-1c3e5d3f60e26415d4227aa1193cf9e2db4df834.tar.xz
linux-dev-1c3e5d3f60e26415d4227aa1193cf9e2db4df834.zip
x86/entry: Make entry_64_compat.S objtool clean
Currently entry_64_compat is exempt from objtool, but with vmlinux mode there is no hiding it. Make the following changes to make it pass: - change entry_SYSENTER_compat to STT_NOTYPE; it's not a function and doesn't have function type stack setup. - mark all STT_NOTYPE symbols with UNWIND_HINT_EMPTY; so we do validate them and don't treat them as unreachable. - don't abuse RSP as a temp register, this confuses objtool mightily as it (rightfully) thinks we're doing unspeakable things to the stack. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com> Acked-by: Andy Lutomirski <luto@kernel.org> Link: https://lkml.kernel.org/r/20200505134341.272248024@linutronix.de
Diffstat (limited to 'arch/x86/entry/entry_64_compat.S')
-rw-r--r--arch/x86/entry/entry_64_compat.S25
1 files changed, 20 insertions, 5 deletions
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 7c29ed898033..0f974ae01e62 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -46,12 +46,14 @@
* ebp user stack
* 0(%ebp) arg6
*/
-SYM_FUNC_START(entry_SYSENTER_compat)
+SYM_CODE_START(entry_SYSENTER_compat)
+ UNWIND_HINT_EMPTY
/* Interrupts are off on entry. */
SWAPGS
- /* We are about to clobber %rsp anyway, clobbering here is OK */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+ pushq %rax
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+ popq %rax
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -104,6 +106,9 @@ SYM_FUNC_START(entry_SYSENTER_compat)
xorl %r14d, %r14d /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */
+
+ UNWIND_HINT_REGS
+
cld
/*
@@ -141,7 +146,7 @@ SYM_FUNC_START(entry_SYSENTER_compat)
popfq
jmp .Lsysenter_flags_fixed
SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
-SYM_FUNC_END(entry_SYSENTER_compat)
+SYM_CODE_END(entry_SYSENTER_compat)
/*
* 32-bit SYSCALL entry.
@@ -191,6 +196,7 @@ SYM_FUNC_END(entry_SYSENTER_compat)
* 0(%esp) arg6
*/
SYM_CODE_START(entry_SYSCALL_compat)
+ UNWIND_HINT_EMPTY
/* Interrupts are off on entry. */
swapgs
@@ -241,6 +247,8 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */
+ UNWIND_HINT_REGS
+
movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
@@ -328,6 +336,7 @@ SYM_CODE_END(entry_SYSCALL_compat)
* ebp arg6
*/
SYM_CODE_START(entry_INT80_compat)
+ UNWIND_HINT_EMPTY
/*
* Interrupts are off on entry.
*/
@@ -349,8 +358,11 @@ SYM_CODE_START(entry_INT80_compat)
/* Need to switch before accessing the thread stack. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+
/* In the Xen PV case we already run on the thread stack. */
- ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+ ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+
+ movq %rsp, %rdi
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 6*8(%rdi) /* regs->ss */
@@ -389,6 +401,9 @@ SYM_CODE_START(entry_INT80_compat)
xorl %r14d, %r14d /* nospec r14 */
pushq %r15 /* pt_regs->r15 */
xorl %r15d, %r15d /* nospec r15 */
+
+ UNWIND_HINT_REGS
+
cld
movq %rsp, %rdi