aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/platform/efi/efi_stub_64.S
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2020-01-03 12:39:42 +0100
committerIngo Molnar <mingo@kernel.org>2020-01-10 18:55:02 +0100
commite5f930fe8dafd2055220c95958926af16ee20713 (patch)
tree9f6c51bdecd6b109e2a004ef9213343975018a72 /arch/x86/platform/efi/efi_stub_64.S
parentefi/x86: Simplify i386 efi_call_phys() firmware call wrapper (diff)
downloadlinux-dev-e5f930fe8dafd2055220c95958926af16ee20713.tar.xz
linux-dev-e5f930fe8dafd2055220c95958926af16ee20713.zip
efi/x86: Simplify 64-bit EFI firmware call wrapper
The efi_call() wrapper used to invoke EFI runtime services serves a number of purposes: - realign the stack to 16 bytes - preserve FP and CR0 register state - translate from SysV to MS calling convention. Preserving CR0.TS is no longer necessary in Linux, and preserving the FP register state is also redundant in most cases, since efi_call() is almost always used from within the scope of a pair of kernel_fpu_begin()/ kernel_fpu_end() calls, with the exception of the early call to SetVirtualAddressMap() and the SGI UV support code. So let's add a pair of kernel_fpu_begin()/_end() calls there as well, and remove the unnecessary code from the assembly implementation of efi_call(), and only keep the pieces that deal with the stack alignment and the ABI translation. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Arvind Sankar <nivedita@alum.mit.edu> Cc: Matthew Garrett <mjg59@google.com> Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/20200103113953.9571-10-ardb@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/platform/efi/efi_stub_64.S')
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S39
1 files changed, 4 insertions, 35 deletions
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index b1d2313fe3bf..e7e1020f4ccb 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -8,41 +8,12 @@
*/
#include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/msr.h>
-#include <asm/processor-flags.h>
-#include <asm/page_types.h>
-
-#define SAVE_XMM \
- mov %rsp, %rax; \
- subq $0x70, %rsp; \
- and $~0xf, %rsp; \
- mov %rax, (%rsp); \
- mov %cr0, %rax; \
- clts; \
- mov %rax, 0x8(%rsp); \
- movaps %xmm0, 0x60(%rsp); \
- movaps %xmm1, 0x50(%rsp); \
- movaps %xmm2, 0x40(%rsp); \
- movaps %xmm3, 0x30(%rsp); \
- movaps %xmm4, 0x20(%rsp); \
- movaps %xmm5, 0x10(%rsp)
-
-#define RESTORE_XMM \
- movaps 0x60(%rsp), %xmm0; \
- movaps 0x50(%rsp), %xmm1; \
- movaps 0x40(%rsp), %xmm2; \
- movaps 0x30(%rsp), %xmm3; \
- movaps 0x20(%rsp), %xmm4; \
- movaps 0x10(%rsp), %xmm5; \
- mov 0x8(%rsp), %rsi; \
- mov %rsi, %cr0; \
- mov (%rsp), %rsp
+#include <asm/nospec-branch.h>
SYM_FUNC_START(efi_call)
pushq %rbp
movq %rsp, %rbp
- SAVE_XMM
+ and $~0xf, %rsp
mov 16(%rbp), %rax
subq $48, %rsp
mov %r9, 32(%rsp)
@@ -50,9 +21,7 @@ SYM_FUNC_START(efi_call)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
- call *%rdi
- addq $48, %rsp
- RESTORE_XMM
- popq %rbp
+ CALL_NOSPEC %rdi
+ leave
ret
SYM_FUNC_END(efi_call)