diff options
author | 2018-07-03 23:21:15 +0000 | |
---|---|---|
committer | 2018-07-03 23:21:15 +0000 | |
commit | db0a8dc5361509a095803fba56caec4ceadefc83 (patch) | |
tree | 3778902df8cf9a90b54835039638b4dee6ad817c | |
parent | Add retguard macros for libc. (diff) | |
download | wireguard-openbsd-db0a8dc5361509a095803fba56caec4ceadefc83.tar.xz wireguard-openbsd-db0a8dc5361509a095803fba56caec4ceadefc83.zip |
Add retguard macros for kernel asm.
ok deraadt, ok mlarkin (vmm_support)
-rw-r--r-- | sys/arch/amd64/amd64/acpi_wakecode.S | 13 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/aes_intel.S | 58 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/copy.S | 23 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/locore.S | 20 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/spl.S | 10 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/vmm_support.S | 39 |
6 files changed, 151 insertions, 12 deletions
diff --git a/sys/arch/amd64/amd64/acpi_wakecode.S b/sys/arch/amd64/amd64/acpi_wakecode.S index a9d3f4e778e..6c1abf1fb4c 100644 --- a/sys/arch/amd64/amd64/acpi_wakecode.S +++ b/sys/arch/amd64/amd64/acpi_wakecode.S @@ -1,4 +1,4 @@ -/* $OpenBSD: acpi_wakecode.S,v 1.43 2018/06/21 07:33:30 mlarkin Exp $ */ +/* $OpenBSD: acpi_wakecode.S,v 1.44 2018/07/03 23:21:15 mortimer Exp $ */ /* * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org> * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org> @@ -465,6 +465,7 @@ _ACPI_TRMP_OFFSET(.Lhib_hlt_real) .code64 /* Switch to hibernate resume pagetable */ NENTRY(hibernate_activate_resume_pt_machdep) + RETGUARD_SETUP(hibernate_activate_resume_pt_machdep, r11) /* Enable large pages */ movq %cr4, %rax orq $(CR4_PSE), %rax @@ -478,23 +479,27 @@ NENTRY(hibernate_activate_resume_pt_machdep) movq %rax, %cr3 jmp 1f -1: nop +1: RETGUARD_CHECK(hibernate_activate_resume_pt_machdep, r11) ret /* * Switch to the private resume-time hibernate stack */ NENTRY(hibernate_switch_stack_machdep) + RETGUARD_SETUP(hibernate_switch_stack_machdep, r11) movq (%rsp), %rax movq %rax, HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET movq $(HIBERNATE_STACK_PAGE + HIBERNATE_STACK_OFFSET), %rax movq %rax, %rsp /* On our own stack from here onward */ + RETGUARD_CHECK(hibernate_switch_stack_machdep, r11) ret NENTRY(hibernate_flush) + RETGUARD_SETUP(hibernate_flush, r11) invlpg HIBERNATE_INFLATE_PAGE + RETGUARD_CHECK(hibernate_flush, r11) ret #endif /* HIBERNATE */ @@ -716,6 +721,9 @@ NENTRY(acpi_savecpu) movq %r14, .Lacpi_saved_r14 movq %r15, .Lacpi_saved_r15 + /* Scratch reg saved - set up retguard */ + RETGUARD_SETUP(acpi_savecpu, r11) + pushfq popq .Lacpi_saved_fl @@ -786,4 +794,5 @@ NENTRY(acpi_savecpu) str .Lacpi_saved_tr movl $1, %eax + RETGUARD_CHECK(acpi_savecpu, r11) ret diff --git a/sys/arch/amd64/amd64/aes_intel.S b/sys/arch/amd64/amd64/aes_intel.S index 85f216a1bb4..c463d679cf4 100644 --- a/sys/arch/amd64/amd64/aes_intel.S +++ b/sys/arch/amd64/amd64/aes_intel.S @@ -1,4 +1,4 @@ -/* $OpenBSD: aes_intel.S,v 1.10 2018/01/17 16:33:33 deraadt Exp $ */ +/* $OpenBSD: aes_intel.S,v 1.11 2018/07/03 23:21:15 mortimer Exp $ */ /* * Implement AES algorithm in Intel AES-NI instructions. @@ -106,6 +106,7 @@ _key_expansion_128: _key_expansion_256a: + RETGUARD_SETUP(_key_expansion_128, rax) pshufd $0b11111111,%xmm1,%xmm1 shufps $0b00010000,%xmm0,%xmm4 pxor %xmm4,%xmm0 @@ -114,9 +115,11 @@ _key_expansion_256a: pxor %xmm1,%xmm0 movaps %xmm0,(%rcx) add $0x10,%rcx + RETGUARD_CHECK(_key_expansion_128, rax) ret _key_expansion_192a: + RETGUARD_SETUP(_key_expansion_192a, rax) pshufd $0b01010101,%xmm1,%xmm1 shufps $0b00010000,%xmm0,%xmm4 pxor %xmm4,%xmm0 @@ -137,9 +140,11 @@ _key_expansion_192a: shufps $0b01001110,%xmm2,%xmm1 movaps %xmm1,16(%rcx) add $0x20,%rcx + RETGUARD_CHECK(_key_expansion_192a, rax) ret _key_expansion_192b: + RETGUARD_SETUP(_key_expansion_192b, rax) pshufd $0b01010101,%xmm1,%xmm1 shufps $0b00010000,%xmm0,%xmm4 pxor %xmm4,%xmm0 @@ -155,9 +160,11 @@ _key_expansion_192b: movaps %xmm0,(%rcx) add $0x10,%rcx + RETGUARD_CHECK(_key_expansion_192b, rax) ret _key_expansion_256b: + RETGUARD_SETUP(_key_expansion_256b, rax) pshufd $0b10101010,%xmm1,%xmm1 shufps $0b00010000,%xmm2,%xmm4 pxor %xmm4,%xmm2 @@ -166,12 +173,14 @@ _key_expansion_256b: pxor %xmm1,%xmm2 movaps %xmm2,(%rcx) add $0x10,%rcx + RETGUARD_CHECK(_key_expansion_256b, rax) ret /* * void aesni_set_key(struct aesni_session *ses, uint8_t *key, size_t len) */ ENTRY(aesni_set_key) + RETGUARD_SETUP(aesni_set_key, r11) movups (%rsi),%xmm0 # user key (first 16 bytes) movaps %xmm0,(%rdi) lea 0x10(%rdi),%rcx # key addr @@ -267,16 +276,19 @@ ENTRY(aesni_set_key) sub $0x10,%rsi cmp %rcx,%rdi jb 4b + RETGUARD_CHECK(aesni_set_key, r11) ret /* * void aesni_enc(struct aesni_session *ses, uint8_t *dst, uint8_t *src) */ ENTRY(aesni_enc) + RETGUARD_SETUP(aesni_enc, r11) movl 480(KEYP),KLEN # key length movups (INP),STATE # input call _aesni_enc1 movups STATE,(OUTP) # output + RETGUARD_CHECK(aesni_enc, r11) ret /* @@ -292,6 +304,7 @@ ENTRY(aesni_enc) * TKEYP (T1) */ _aesni_enc1: + RETGUARD_SETUP(_aesni_enc1, rax) movaps (KEYP),KEY # key mov KEYP,TKEYP pxor KEY,STATE # round 0 @@ -333,6 +346,7 @@ _aesni_enc1: aesenc KEY,STATE movaps 0x70(TKEYP),KEY aesenclast KEY,STATE + RETGUARD_CHECK(_aesni_enc1, rax) ret /* @@ -354,6 +368,7 @@ _aesni_enc1: * TKEYP (T1) */ _aesni_enc4: + RETGUARD_SETUP(_aesni_enc4, rax) movaps (KEYP),KEY # key mov KEYP,TKEYP pxor KEY,STATE1 # round 0 @@ -440,17 +455,20 @@ _aesni_enc4: aesenclast KEY,STATE2 aesenclast KEY,STATE3 aesenclast KEY,STATE4 + RETGUARD_CHECK(_aesni_enc4, rax) ret /* * void aesni_dec(struct aesni_session *ses, uint8_t *dst, uint8_t *src) */ ENTRY(aesni_dec) + RETGUARD_SETUP(aesni_dec, r11) mov 480(KEYP),KLEN # key length add $240,KEYP movups (INP),STATE # input call _aesni_dec1 movups STATE,(OUTP) # output + RETGUARD_CHECK(aesni_dec, r11) ret /* @@ -466,6 +484,7 @@ ENTRY(aesni_dec) * TKEYP (T1) */ _aesni_dec1: + RETGUARD_SETUP(_aesni_dec1, rax) movaps (KEYP),KEY # key mov KEYP,TKEYP pxor KEY,STATE # round 0 @@ -507,6 +526,7 @@ _aesni_dec1: aesdec KEY,STATE movaps 0x70(TKEYP),KEY aesdeclast KEY,STATE + RETGUARD_CHECK(_aesni_dec1, rax) ret /* @@ -528,6 +548,7 @@ _aesni_dec1: * TKEYP (T1) */ _aesni_dec4: + RETGUARD_SETUP(_aesni_dec4, rax) movaps (KEYP),KEY # key mov KEYP,TKEYP pxor KEY,STATE1 # round 0 @@ -614,6 +635,7 @@ _aesni_dec4: aesdeclast KEY,STATE2 aesdeclast KEY,STATE3 aesdeclast KEY,STATE4 + RETGUARD_CHECK(_aesni_dec4, rax) ret #if 0 @@ -622,6 +644,7 @@ _aesni_dec4: * size_t len) */ ENTRY(aesni_ecb_enc) + RETGUARD_SETUP(aesni_ecb_enc, r11) test LEN,LEN # check length jz 3f mov 480(KEYP),KLEN @@ -658,6 +681,7 @@ ENTRY(aesni_ecb_enc) cmp $16,LEN jge 2b 3: + RETGUARD_CHECK(aesni_ecb_enc, r11) ret /* @@ -665,6 +689,7 @@ ENTRY(aesni_ecb_enc) * size_t len); */ ENTRY(aesni_ecb_dec) + RETGUARD_SETUP(aesni_ecb_dec, r11) test LEN,LEN jz 3f mov 480(KEYP),KLEN @@ -702,6 +727,7 @@ ENTRY(aesni_ecb_dec) cmp $16,LEN jge 2b 3: + RETGUARD_CHECK(aesni_ecb_dec, r11) ret #endif @@ -710,6 +736,7 @@ ENTRY(aesni_ecb_dec) * size_t len, uint8_t *iv) */ ENTRY(aesni_cbc_enc) + RETGUARD_SETUP(aesni_cbc_enc, r11) cmp $16,LEN jb 2f mov 480(KEYP),KLEN @@ -727,6 +754,7 @@ ENTRY(aesni_cbc_enc) jge 1b movups STATE,(IVP) 2: + RETGUARD_CHECK(aesni_cbc_enc, r11) ret /* @@ -734,6 +762,7 @@ ENTRY(aesni_cbc_enc) * size_t len, uint8_t *iv) */ ENTRY(aesni_cbc_dec) + RETGUARD_SETUP(aesni_cbc_dec, r11) cmp $16,LEN jb 4f mov 480(KEYP),KLEN @@ -784,6 +813,7 @@ ENTRY(aesni_cbc_dec) 3: movups IV,(IVP) 4: + RETGUARD_CHECK(aesni_cbc_dec, r11) ret /* @@ -799,6 +829,7 @@ ENTRY(aesni_cbc_dec) * BSWAP_MASK == endian swapping mask */ _aesni_inc_init: + RETGUARD_SETUP(_aesni_inc_init, rax) movdqa CTR,IV pslldq $8,IV movdqu .Lbswap_mask,BSWAP_MASK @@ -806,6 +837,7 @@ _aesni_inc_init: mov $1,TCTR_LOW movd TCTR_LOW,INC movd CTR,TCTR_LOW + RETGUARD_CHECK(_aesni_inc_init, rax) ret /* @@ -824,6 +856,7 @@ _aesni_inc_init: * TCTR_LOW: == lower dword of CTR */ _aesni_inc: + RETGUARD_SETUP(_aesni_inc, rax) paddq INC,CTR add $1,TCTR_LOW jnc 1f @@ -833,6 +866,7 @@ _aesni_inc: 1: movaps CTR,IV pshufb BSWAP_MASK,IV + RETGUARD_CHECK(_aesni_inc, rax) ret /* @@ -840,6 +874,7 @@ _aesni_inc: * size_t len, uint8_t *icb) */ ENTRY(aesni_ctr_enc) + RETGUARD_SETUP(aesni_ctr_enc, r10) cmp $16,LEN jb 4f mov 480(KEYP),KLEN @@ -893,9 +928,11 @@ ENTRY(aesni_ctr_enc) 3: movq IV,(IVP) 4: + RETGUARD_CHECK(aesni_ctr_enc, r10) ret _aesni_gmac_gfmul: + RETGUARD_SETUP(_aesni_gmac_gfmul, rax) movdqa %xmm0,%xmm3 pclmulqdq $0x00,%xmm1,%xmm3 # xmm3 holds a0*b0 movdqa %xmm0,%xmm4 @@ -959,12 +996,14 @@ _aesni_gmac_gfmul: pxor %xmm8,%xmm2 pxor %xmm2,%xmm3 pxor %xmm3,%xmm6 # the result is in xmm6 + RETGUARD_CHECK(_aesni_gmac_gfmul, rax) ret /* * void aesni_gmac_update(GHASH_CTX *ghash, uint8_t *src, size_t len) */ ENTRY(aesni_gmac_update) + RETGUARD_SETUP(aesni_gmac_update, r11) cmp $16,%rdx jb 2f @@ -990,6 +1029,7 @@ ENTRY(aesni_gmac_update) movdqu %xmm6,16(%rdi) movdqu %xmm6,32(%rdi) 2: + RETGUARD_CHECK(aesni_gmac_update, r11) ret /* @@ -997,12 +1037,14 @@ ENTRY(aesni_gmac_update) * uint8_t *icb, uint8_t *hashstate) */ ENTRY(aesni_gmac_final) + RETGUARD_SETUP(aesni_gmac_final, r11) movl 480(KEYP),KLEN # key length movdqu (INP),STATE # icb call _aesni_enc1 movdqu (HSTATE),IN pxor IN,STATE movdqu STATE,(OUTP) # output + RETGUARD_CHECK(aesni_gmac_final, r11) ret /* @@ -1010,6 +1052,8 @@ ENTRY(aesni_gmac_final) * size_t len, uint8_t *iv) */ ENTRY(aesni_xts_enc) + RETGUARD_SETUP(aesni_xts_enc, r11) + RETGUARD_PUSH(r11) cmp $16,%rcx jb 2f @@ -1031,6 +1075,8 @@ ENTRY(aesni_xts_enc) cmp $16,%rcx jge 1b 2: + RETGUARD_POP(r11) + RETGUARD_CHECK(aesni_xts_enc, r11) ret /* @@ -1038,6 +1084,8 @@ ENTRY(aesni_xts_enc) * size_t len, uint8_t *iv) */ ENTRY(aesni_xts_dec) + RETGUARD_SETUP(aesni_xts_dec, r11) + RETGUARD_PUSH(r11) cmp $16,%rcx jb 2f @@ -1060,6 +1108,8 @@ ENTRY(aesni_xts_dec) cmp $16,%rcx jge 1b 2: + RETGUARD_POP(r11) + RETGUARD_CHECK(aesni_xts_dec, r11) ret /* @@ -1070,6 +1120,8 @@ ENTRY(aesni_xts_dec) * xts is in %rdi, iv is in %r8 and we return the tweak in %xmm3. */ _aesni_xts_tweak: + RETGUARD_SETUP(_aesni_xts_tweak, rax) + RETGUARD_PUSH(rax) mov (%r8),%r10 movd %r10,%xmm0 # Last 64-bits of IV are always zero. mov KEYP,%r11 @@ -1078,12 +1130,15 @@ _aesni_xts_tweak: call _aesni_enc1 movdqa %xmm0,%xmm3 mov %r11,KEYP + RETGUARD_POP(rax) + RETGUARD_CHECK(_aesni_xts_tweak, rax) ret /* * Exponentiate AES XTS tweak (in %xmm3). */ _aesni_xts_tweak_exp: + RETGUARD_SETUP(_aesni_xts_tweak_exp, rax) pextrw $7,%xmm3,%r10 pextrw $3,%xmm3,%r11 psllq $1,%xmm3 # Left shift. @@ -1101,4 +1156,5 @@ _aesni_xts_tweak_exp: xor $0x87,%r11 # AES XTS alpha - GF(2^128). pinsrw $0,%r11,%xmm3 2: + RETGUARD_CHECK(_aesni_xts_tweak_exp, rax) ret diff --git a/sys/arch/amd64/amd64/copy.S b/sys/arch/amd64/amd64/copy.S index 5f7f37aa429..e15147eb741 100644 --- a/sys/arch/amd64/amd64/copy.S +++ b/sys/arch/amd64/amd64/copy.S @@ -1,4 +1,4 @@ -/* $OpenBSD: copy.S,v 1.11 2017/09/16 02:03:40 guenther Exp $ */ +/* $OpenBSD: copy.S,v 1.12 2018/07/03 23:21:15 mortimer Exp $ */ /* $NetBSD: copy.S,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */ /* @@ -52,6 +52,7 @@ */ ENTRY(kcopy) + RETGUARD_SETUP(copy, r10) movq CPUVAR(CURPCB),%rax pushq PCB_ONFAULT(%rax) leaq _C_LABEL(copy_fault)(%rip),%r11 @@ -76,6 +77,7 @@ ENTRY(kcopy) movq CPUVAR(CURPCB),%rdx popq PCB_ONFAULT(%rdx) xorq %rax,%rax + RETGUARD_CHECK(copy, r10) ret 1: addq %rcx,%rdi # copy backward @@ -97,9 +99,11 @@ ENTRY(kcopy) movq CPUVAR(CURPCB),%rdx popq PCB_ONFAULT(%rdx) xorq %rax,%rax + RETGUARD_CHECK(copy, r10) ret ENTRY(copyout) + RETGUARD_SETUP(copy, r10) pushq $0 xchgq %rdi,%rsi @@ -132,9 +136,11 @@ ENTRY(copyout) SMAP_CLAC popq PCB_ONFAULT(%rdx) xorl %eax,%eax + RETGUARD_CHECK(copy, r10) ret ENTRY(copyin) + RETGUARD_SETUP(copy, r10) movq CPUVAR(CURPCB),%rax pushq $0 leaq _C_LABEL(copy_fault)(%rip),%r11 @@ -169,6 +175,7 @@ ENTRY(copyin) movq CPUVAR(CURPCB),%rdx popq PCB_ONFAULT(%rdx) xorl %eax,%eax + RETGUARD_CHECK(copy, r10) ret NENTRY(copy_fault) @@ -176,9 +183,11 @@ NENTRY(copy_fault) movq CPUVAR(CURPCB),%rdx popq PCB_ONFAULT(%rdx) movl $EFAULT,%eax + RETGUARD_CHECK(copy, r10) ret ENTRY(copyoutstr) + RETGUARD_SETUP(copystr_return, r10) xchgq %rdi,%rsi movq %rdx,%r8 movq %rcx,%r9 @@ -220,6 +229,7 @@ ENTRY(copyoutstr) jmp copystr_return ENTRY(copyinstr) + RETGUARD_SETUP(copystr_return, r10) xchgq %rdi,%rsi movq %rdx,%r8 movq %rcx,%r9 @@ -272,10 +282,12 @@ copystr_return: jz 8f subq %rdx,%r8 movq %r8,(%r9) - -8: ret +8: + RETGUARD_CHECK(copystr_return, r10) + ret ENTRY(copystr) + RETGUARD_SETUP(copystr, r10) xchgq %rdi,%rsi movq %rdx,%r8 @@ -301,8 +313,9 @@ ENTRY(copystr) jz 7f subq %rdx,%r8 movq %r8,(%rcx) - -7: ret +7: + RETGUARD_CHECK(copystr, r10) + ret .section .rodata .globl _C_LABEL(_stac) diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S index a05af42b870..b1f85ab6137 100644 --- a/sys/arch/amd64/amd64/locore.S +++ b/sys/arch/amd64/amd64/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.102 2018/07/01 04:47:32 guenther Exp $ */ +/* $OpenBSD: locore.S,v 1.103 2018/07/03 23:21:15 mortimer Exp $ */ /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ /* @@ -275,6 +275,7 @@ _C_LABEL(sigfillsiz): * Change the global descriptor table. */ NENTRY(lgdt) + RETGUARD_SETUP(lgdt, r11) /* Reload the descriptor table. */ movq %rdi,%rax lgdt (%rax) @@ -290,6 +291,7 @@ NENTRY(lgdt) popq %rax pushq $GSEL(GCODE_SEL, SEL_KPL) pushq %rax + RETGUARD_CHECK(lgdt, r11) lretq ENTRY(setjmp) @@ -442,6 +444,9 @@ restore_saved: movq PCB_RSP(%r13),%rsp movq PCB_RBP(%r13),%rbp + /* Stack pivot done, setup RETGUARD */ + RETGUARD_SETUP_OFF(cpu_switchto, r11, 6*8) + movq PCB_CR3(%r13),%rax movq %rax,%cr3 /* %rax used below too */ @@ -486,6 +491,7 @@ switch_restored: popq %r12 popq %rbp popq %rbx + RETGUARD_CHECK(cpu_switchto, r11) ret ENTRY(cpu_idle_enter) @@ -530,10 +536,11 @@ switch_active: * Update pcb, saving current processor state. */ ENTRY(savectx) + RETGUARD_SETUP(savectx, r11) /* Save stack pointers. */ movq %rsp,PCB_RSP(%rdi) movq %rbp,PCB_RBP(%rdi) - + RETGUARD_CHECK(savectx, r11) ret IDTVEC(syscall32) @@ -962,6 +969,7 @@ warn_once: */ ENTRY(xrstor_user) + RETGUARD_SETUP(xrstor_user, r11) movq %rsi, %rdx movl %esi, %eax shrq $32, %rdx @@ -971,23 +979,28 @@ xrstor_fault: .byte 0x48; fxrstor (%rdi) /* really fxrstor64 */ CODEPATCH_END(CPTAG_XRSTOR) xorl %eax, %eax + RETGUARD_CHECK(xrstor_user, r11) ret NENTRY(xrstor_resume) movl $1, %eax + RETGUARD_CHECK(xrstor_user, r11) ret END(xrstor_user) ENTRY(fpusave) + RETGUARD_SETUP(fpusave, r11) movq xsave_mask(%rip),%rdx movl %edx,%eax shrq $32,%rdx CODEPATCH_START .byte 0x48; fxsave (%rdi) /* really fxsave64 */ CODEPATCH_END(CPTAG_XSAVE) + RETGUARD_CHECK(fpusave, r11) ret END(fpusave) ENTRY(fpusavereset) + RETGUARD_SETUP(fpusavereset, r11) movq xsave_mask(%rip),%rdx movl %edx,%eax shrq $32,%rdx @@ -1001,6 +1014,7 @@ ENTRY(fpusavereset) CODEPATCH_START .byte 0x48; fxrstor (%rdi) /* really fxrstor64 */ CODEPATCH_END(CPTAG_XRSTOR) + RETGUARD_CHECK(fpusavereset, r11) ret END(fpusavereset) @@ -1018,6 +1032,7 @@ _C_LABEL(_xsaveopt): .byte 0x48; xsaveopt (%rdi) /* really xsaveopt64 */ ENTRY(pagezero) + RETGUARD_SETUP(pagezero, r11) movq $-PAGE_SIZE,%rdx subq %rdx,%rdi xorq %rax,%rax @@ -1029,6 +1044,7 @@ ENTRY(pagezero) addq $32,%rdx jne 1b sfence + RETGUARD_CHECK(pagezero, r11) ret #if NXEN > 0 diff --git a/sys/arch/amd64/amd64/spl.S b/sys/arch/amd64/amd64/spl.S index e544520a491..063dbe7db81 100644 --- a/sys/arch/amd64/amd64/spl.S +++ b/sys/arch/amd64/amd64/spl.S @@ -1,4 +1,4 @@ -/* $OpenBSD: spl.S,v 1.15 2018/07/03 20:15:57 guenther Exp $ */ +/* $OpenBSD: spl.S,v 1.16 2018/07/03 23:21:15 mortimer Exp $ */ /* $NetBSD: spl.S,v 1.3 2004/06/28 09:13:11 fvdl Exp $ */ /* @@ -86,16 +86,20 @@ .align 16, 0xcc _C_LABEL(splhigh): + RETGUARD_SETUP(splhigh, r11) movl $IPL_HIGH,%eax xchgl %eax,CPUVAR(ILEVEL) + RETGUARD_CHECK(splhigh, r11) ret .align 16, 0xcc _C_LABEL(splx): + RETGUARD_SETUP(splx, r11) movl 4(%esp),%eax movl %eax,CPUVAR(ILEVEL) testl %eax,%eax jnz _C_LABEL(Xspllower) + RETGUARD_CHECK(splx, r11) ret #endif /* PROF || GPROF */ #endif @@ -116,8 +120,10 @@ _C_LABEL(splx): */ KIDTVEC(spllower) _PROF_PROLOGUE + RETGUARD_SETUP(Xspllower, r11) pushq %rbx pushq %r13 + RETGUARD_PUSH(r11) movl %edi,%ebx leaq 1f(%rip),%r13 # address to resume loop at 1: movl %ebx,%eax # get cpl @@ -127,8 +133,10 @@ KIDTVEC(spllower) jnz 2f movl %ebx,CPUVAR(ILEVEL) sti + RETGUARD_POP(r11) popq %r13 popq %rbx + RETGUARD_CHECK(Xspllower, r11) ret 2: bsrq %rax,%rax btrq %rax,CPUVAR(IPENDING) diff --git a/sys/arch/amd64/amd64/vmm_support.S b/sys/arch/amd64/amd64/vmm_support.S index b5c4bd07561..f0e9bba9864 100644 --- a/sys/arch/amd64/amd64/vmm_support.S +++ b/sys/arch/amd64/amd64/vmm_support.S @@ -1,4 +1,4 @@ -/* $OpenBSD: vmm_support.S,v 1.10 2018/04/24 20:33:28 mlarkin Exp $ */ +/* $OpenBSD: vmm_support.S,v 1.11 2018/07/03 23:21:15 mortimer Exp $ */ /* * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> * @@ -58,84 +58,110 @@ _C_LABEL(vmm_dispatch_intr): ret _C_LABEL(vmxon): + RETGUARD_SETUP(vmxon, r11) vmxon (%rdi) jz failed_on jc failed_on xorq %rax, %rax + RETGUARD_CHECK(vmxon, r11) ret failed_on: movq $0x01, %rax + RETGUARD_CHECK(vmxon, r11) ret _C_LABEL(vmxoff): + RETGUARD_SETUP(vmxoff, r11) vmxoff jz failed_off jc failed_off xorq %rax, %rax + RETGUARD_CHECK(vmxoff, r11) ret failed_off: movq $0x01, %rax + RETGUARD_CHECK(vmxoff, r11) ret _C_LABEL(vmclear): + RETGUARD_SETUP(vmclear, r11) vmclear (%rdi) jz failed_clear jc failed_clear xorq %rax, %rax + RETGUARD_CHECK(vmclear, r11) ret failed_clear: movq $0x01, %rax + RETGUARD_CHECK(vmclear, r11) ret _C_LABEL(vmptrld): + RETGUARD_SETUP(vmptrld, r11) vmptrld (%rdi) jz failed_ptrld jc failed_ptrld xorq %rax, %rax + RETGUARD_CHECK(vmptrld, r11) ret failed_ptrld: movq $0x01, %rax + RETGUARD_CHECK(vmptrld, r11) ret _C_LABEL(vmptrst): + RETGUARD_SETUP(vmptrst, r11) vmptrst (%rdi) jz failed_ptrst jc failed_ptrst xorq %rax, %rax + RETGUARD_CHECK(vmptrst, r11) ret failed_ptrst: movq $0x01, %rax + RETGUARD_CHECK(vmptrst, r11) ret _C_LABEL(vmwrite): + RETGUARD_SETUP(vmwrite, r11) vmwrite %rsi, %rdi jz failed_write jc failed_write xorq %rax, %rax + RETGUARD_CHECK(vmwrite, r11) ret failed_write: movq $0x01, %rax + RETGUARD_CHECK(vmwrite, r11) ret _C_LABEL(vmread): + RETGUARD_SETUP(vmread, r11) vmread %rdi, (%rsi) jz failed_read jc failed_read xorq %rax, %rax + RETGUARD_CHECK(vmread, r11) ret failed_read: movq $0x01, %rax + RETGUARD_CHECK(vmread, r11) ret _C_LABEL(invvpid): + RETGUARD_SETUP(invvpid, r11) invvpid (%rsi), %rdi + RETGUARD_CHECK(invvpid, r11) ret _C_LABEL(invept): + RETGUARD_SETUP(invept, r11) invept (%rsi), %rdi + RETGUARD_CHECK(invept, r11) ret _C_LABEL(vmx_enter_guest): + RETGUARD_SETUP(vmx_enter_guest, r11) movq %rdx, %r8 /* resume flag */ testq %r8, %r8 jnz skip_init @@ -208,6 +234,8 @@ skip_init: pushq %rax pushq %rdx + RETGUARD_PUSH(r11) + /* Preserve callee-preserved registers as per AMD64 ABI */ pushq %r15 pushq %r14 @@ -328,6 +356,8 @@ restore_host: popq %r13 popq %r14 popq %r15 + + RETGUARD_POP(r11) /* * Restore saved MSRs @@ -385,9 +415,11 @@ restore_host: popfq movq %rdi, %rax + RETGUARD_CHECK(vmx_enter_guest, r11) ret _C_LABEL(svm_enter_guest): + RETGUARD_SETUP(svm_enter_guest, r11) clgi movq %rdi, %r8 pushfq @@ -445,6 +477,8 @@ _C_LABEL(svm_enter_guest): pushq %rax pushq %rdx + RETGUARD_PUSH(r11) + /* Preserve callee-preserved registers as per AMD64 ABI */ pushq %r15 pushq %r14 @@ -515,6 +549,8 @@ restore_host_svm: popq %r13 popq %r14 popq %r15 + + RETGUARD_POP(r11) /* * Restore saved MSRs @@ -586,4 +622,5 @@ restore_host_svm: movq %rdi, %rax + RETGUARD_CHECK(svm_enter_guest, r11) ret |