From 083db6764821996526970e42d09c1ab2f4155dd4 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:36 -0500 Subject: x86/paravirt: Fix callee-saved function ELF sizes The __raw_callee_save_*() functions have an ELF symbol size of zero, which confuses objtool and other tools. Fixes a bunch of warnings like the following: arch/x86/xen/mmu_pv.o: warning: objtool: __raw_callee_save_xen_pte_val() is missing an ELF size annotation arch/x86/xen/mmu_pv.o: warning: objtool: __raw_callee_save_xen_pgd_val() is missing an ELF size annotation arch/x86/xen/mmu_pv.o: warning: objtool: __raw_callee_save_xen_make_pte() is missing an ELF size annotation arch/x86/xen/mmu_pv.o: warning: objtool: __raw_callee_save_xen_make_pgd() is missing an ELF size annotation Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/afa6d49bb07497ca62e4fc3b27a2d0cece545b4e.1563413318.git.jpoimboe@redhat.com --- arch/x86/include/asm/paravirt.h | 1 + arch/x86/kernel/kvm.c | 1 + 2 files changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index c25c38a05c1c..d6f5ae2c79ab 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -746,6 +746,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); PV_RESTORE_ALL_CALLER_REGS \ FRAME_END \ "ret;" \ + ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ ".popsection") /* Get a reference to a callee-save function */ diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 82caf01b63dd..6661bd2f08a6 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -838,6 +838,7 @@ asm( "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" "setne %al;" "ret;" +".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;" ".popsection"); #endif -- cgit v1.2.3-59-g8ed1b From d99a6ce70ec6ed990b74bd4e34232fd830d20d27 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:37 -0500 Subject: x86/kvm: Fix fastop function ELF metadata Some of the fastop functions, e.g. em_setcc(), are actually just used as global labels which point to blocks of functions. The global labels are incorrectly annotated as functions. Also the functions themselves don't have size annotations. Fixes a bunch of warnings like the following: arch/x86/kvm/emulate.o: warning: objtool: seto() is missing an ELF size annotation arch/x86/kvm/emulate.o: warning: objtool: em_setcc() is missing an ELF size annotation arch/x86/kvm/emulate.o: warning: objtool: setno() is missing an ELF size annotation arch/x86/kvm/emulate.o: warning: objtool: setc() is missing an ELF size annotation Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Acked-by: Paolo Bonzini Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/c8cc9be60ebbceb3092aa5dd91916039a1f88275.1563413318.git.jpoimboe@redhat.com --- arch/x86/kvm/emulate.c | 44 +++++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8e409ad448f9..718f7d9afedc 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -312,29 +312,42 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); -#define FOP_FUNC(name) \ +#define __FOP_FUNC(name) \ ".align " __stringify(FASTOP_SIZE) " \n\t" \ ".type " name ", @function \n\t" \ name ":\n\t" -#define FOP_RET "ret \n\t" +#define FOP_FUNC(name) \ + __FOP_FUNC(#name) + +#define __FOP_RET(name) \ + "ret \n\t" \ + ".size " name ", .-" name "\n\t" + +#define FOP_RET(name) \ + __FOP_RET(#name) #define FOP_START(op) \ extern void em_##op(struct fastop *fake); \ asm(".pushsection .text, \"ax\" \n\t" \ ".global em_" #op " \n\t" \ - FOP_FUNC("em_" #op) + ".align " __stringify(FASTOP_SIZE) " \n\t" \ + "em_" #op ":\n\t" #define FOP_END \ ".popsection") +#define __FOPNOP(name) \ + __FOP_FUNC(name) \ + __FOP_RET(name) + #define FOPNOP() \ - FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \ - FOP_RET + __FOPNOP(__stringify(__UNIQUE_ID(nop))) #define FOP1E(op, dst) \ - FOP_FUNC(#op "_" #dst) \ - "10: " #op " %" #dst " \n\t" FOP_RET + __FOP_FUNC(#op "_" #dst) \ + "10: " #op " %" #dst " \n\t" \ + __FOP_RET(#op "_" #dst) #define FOP1EEX(op, dst) \ FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) @@ -366,8 +379,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); FOP_END #define FOP2E(op, dst, src) \ - FOP_FUNC(#op "_" #dst "_" #src) \ - #op " %" #src ", %" #dst " \n\t" FOP_RET + __FOP_FUNC(#op "_" #dst "_" #src) \ + #op " %" #src ", %" #dst " \n\t" \ + __FOP_RET(#op "_" #dst "_" #src) #define FASTOP2(op) \ FOP_START(op) \ @@ -405,8 +419,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); FOP_END #define FOP3E(op, dst, src, src2) \ - FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \ - #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET + __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \ + #op " %" #src2 ", %" #src ", %" #dst " \n\t"\ + __FOP_RET(#op "_" #dst "_" #src "_" #src2) /* 3-operand, word-only, src2=cl */ #define FASTOP3WCL(op) \ @@ -423,7 +438,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); ".type " #op ", @function \n\t" \ #op ": \n\t" \ #op " %al \n\t" \ - FOP_RET + __FOP_RET(#op) asm(".pushsection .fixup, \"ax\"\n" ".global kvm_fastop_exception \n" @@ -449,7 +464,10 @@ FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; -FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET +FOP_START(salc) +FOP_FUNC(salc) +"pushf; sbb %al, %al; popf \n\t" +FOP_RET(salc) FOP_END; /* -- cgit v1.2.3-59-g8ed1b From 19f2d8fa98644c7b78845b1d66abeae4e3d9dfa8 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:38 -0500 Subject: x86/kvm: Replace vmx_vmenter()'s call to kvm_spurious_fault() with UD2 Objtool reports the following: arch/x86/kvm/vmx/vmenter.o: warning: objtool: vmx_vmenter()+0x14: call without frame pointer save/setup But frame pointers are necessarily broken anyway, because __vmx_vcpu_run() clobbers RBP with the guest's value before calling vmx_vmenter(). So calling without a frame pointer doesn't make things any worse. Make objtool happy by changing the call to a UD2. Suggested-by: Paolo Bonzini Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Acked-by: Paolo Bonzini Link: https://lkml.kernel.org/r/9fc2216c9dc972f95bb65ce2966a682c6bda1cb0.1563413318.git.jpoimboe@redhat.com --- arch/x86/kvm/vmx/vmenter.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index d4cb1945b2e3..4010d519eb8c 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -54,9 +54,9 @@ ENTRY(vmx_vmenter) ret 3: cmpb $0, kvm_rebooting - jne 4f - call kvm_spurious_fault -4: ret + je 4f + ret +4: ud2 .pushsection .fixup, "ax" 5: jmp 3b -- cgit v1.2.3-59-g8ed1b From 3901336ed9887b075531bffaeef7742ba614058b Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:39 -0500 Subject: x86/kvm: Don't call kvm_spurious_fault() from .fixup After making a change to improve objtool's sibling call detection, it started showing the following warning: arch/x86/kvm/vmx/nested.o: warning: objtool: .fixup+0x15: sibling call from callable instruction with modified stack frame The problem is the ____kvm_handle_fault_on_reboot() macro. It does a fake call by pushing a fake RIP and doing a jump. That tricks the unwinder into printing the function which triggered the exception, rather than the .fixup code. Instead of the hack to make it look like the original function made the call, just change the macro so that the original function actually does make the call. This allows removal of the hack, and also makes objtool happy. I triggered a vmx instruction exception and verified that the stack trace is still sane: kernel BUG at arch/x86/kvm/x86.c:358! invalid opcode: 0000 [#1] SMP PTI CPU: 28 PID: 4096 Comm: qemu-kvm Not tainted 5.2.0+ #16 Hardware name: Lenovo THINKSYSTEM SD530 -[7X2106Z000]-/-[7X2106Z000]-, BIOS -[TEE113Z-1.00]- 07/17/2017 RIP: 0010:kvm_spurious_fault+0x5/0x10 Code: 00 00 00 00 00 8b 44 24 10 89 d2 45 89 c9 48 89 44 24 10 8b 44 24 08 48 89 44 24 08 e9 d4 40 22 00 0f 1f 40 00 0f 1f 44 00 00 <0f> 0b 66 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 41 55 49 89 fd 41 RSP: 0018:ffffbf91c683bd00 EFLAGS: 00010246 RAX: 000061f040000000 RBX: ffff9e159c77bba0 RCX: ffff9e15a5c87000 RDX: 0000000665c87000 RSI: ffff9e15a5c87000 RDI: ffff9e159c77bba0 RBP: 0000000000000000 R08: 0000000000000000 R09: ffff9e15a5c87000 R10: 0000000000000000 R11: fffff8f2d99721c0 R12: ffff9e159c77bba0 R13: ffffbf91c671d960 R14: ffff9e159c778000 R15: 0000000000000000 FS: 00007fa341cbe700(0000) GS:ffff9e15b7400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fdd38356804 CR3: 00000006759de003 CR4: 00000000007606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: loaded_vmcs_init+0x4f/0xe0 alloc_loaded_vmcs+0x38/0xd0 vmx_create_vcpu+0xf7/0x600 kvm_vm_ioctl+0x5e9/0x980 ? __switch_to_asm+0x40/0x70 ? __switch_to_asm+0x34/0x70 ? __switch_to_asm+0x40/0x70 ? __switch_to_asm+0x34/0x70 ? free_one_page+0x13f/0x4e0 do_vfs_ioctl+0xa4/0x630 ksys_ioctl+0x60/0x90 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x55/0x1c0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fa349b1ee5b Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Acked-by: Paolo Bonzini Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/64a9b64d127e87b6920a97afde8e96ea76f6524e.1563413318.git.jpoimboe@redhat.com --- arch/x86/include/asm/kvm_host.h | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0cc5b611a113..8282b8d41209 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1496,25 +1496,29 @@ enum { #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) +asmlinkage void __noreturn kvm_spurious_fault(void); + /* * Hardware virtualization extension instructions may fault if a * reboot turns off virtualization while processes are running. - * Trap the fault and ignore the instruction if that happens. + * Usually after catching the fault we just panic; during reboot + * instead the instruction is ignored. */ -asmlinkage void kvm_spurious_fault(void); - -#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ - "666: " insn "\n\t" \ - "668: \n\t" \ - ".pushsection .fixup, \"ax\" \n" \ - "667: \n\t" \ - cleanup_insn "\n\t" \ - "cmpb $0, kvm_rebooting \n\t" \ - "jne 668b \n\t" \ - __ASM_SIZE(push) " $666b \n\t" \ - "jmp kvm_spurious_fault \n\t" \ - ".popsection \n\t" \ - _ASM_EXTABLE(666b, 667b) +#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ + "666: \n\t" \ + insn "\n\t" \ + "jmp 668f \n\t" \ + "667: \n\t" \ + "call kvm_spurious_fault \n\t" \ + "668: \n\t" \ + ".pushsection .fixup, \"ax\" \n\t" \ + "700: \n\t" \ + cleanup_insn "\n\t" \ + "cmpb $0, kvm_rebooting\n\t" \ + "je 667b \n\t" \ + "jmp 668b \n\t" \ + ".popsection \n\t" \ + _ASM_EXTABLE(666b, 700b) #define __kvm_handle_fault_on_reboot(insn) \ ____kvm_handle_fault_on_reboot(insn, "") -- cgit v1.2.3-59-g8ed1b From e6dd47394493061c605285a868fc72eae2e9c866 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:40 -0500 Subject: x86/entry: Fix thunk function ELF sizes Fix the following warnings: arch/x86/entry/thunk_64.o: warning: objtool: trace_hardirqs_on_thunk() is missing an ELF size annotation arch/x86/entry/thunk_64.o: warning: objtool: trace_hardirqs_off_thunk() is missing an ELF size annotation arch/x86/entry/thunk_64.o: warning: objtool: lockdep_sys_exit_thunk() is missing an ELF size annotation Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/89c97adc9f6cc44a0f5d03cde6d0357662938909.1563413318.git.jpoimboe@redhat.com --- arch/x86/entry/thunk_64.S | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index cfdca8b42c70..cc20465b2867 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -12,9 +12,7 @@ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 - .globl \name - .type \name, @function -\name: + ENTRY(\name) pushq %rbp movq %rsp, %rbp @@ -35,6 +33,7 @@ call \func jmp .L_restore + ENDPROC(\name) _ASM_NOKPROBE(\name) .endm -- cgit v1.2.3-59-g8ed1b From 61a73f5cd1a5794626d216cc56e20a1b195c5d0c Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:41 -0500 Subject: x86/head/64: Annotate start_cpu0() as non-callable After an objtool improvement, it complains about the fact that start_cpu0() jumps to code which has an LRET instruction. arch/x86/kernel/head_64.o: warning: objtool: .head.text+0xe4: unsupported instruction in callable function Technically, start_cpu0() is callable, but it acts nothing like a callable function. Prevent objtool from treating it like one by removing its ELF function annotation. Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/6b1b4505fcb90571a55fa1b52d71fb458ca24454.1563413318.git.jpoimboe@redhat.com --- arch/x86/kernel/head_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index bcd206c8ac90..66b4a7757397 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -253,10 +253,10 @@ END(secondary_startup_64) * start_secondary() via .Ljump_to_C_code. */ ENTRY(start_cpu0) - movq initial_stack(%rip), %rsp UNWIND_HINT_EMPTY + movq initial_stack(%rip), %rsp jmp .Ljump_to_C_code -ENDPROC(start_cpu0) +END(start_cpu0) #endif /* Both SMP bootup and ACPI suspend change these variables */ -- cgit v1.2.3-59-g8ed1b From 3a6ab4bcc52263dd5b1d2fd2e4ce95a38c798b4d Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:42 -0500 Subject: x86/uaccess: Remove ELF function annotation from copy_user_handle_tail() After an objtool improvement, it's complaining about the CLAC in copy_user_handle_tail(): arch/x86/lib/copy_user_64.o: warning: objtool: .altinstr_replacement+0x12: redundant UACCESS disable arch/x86/lib/copy_user_64.o: warning: objtool: copy_user_handle_tail()+0x6: (alt) arch/x86/lib/copy_user_64.o: warning: objtool: copy_user_handle_tail()+0x2: (alt) arch/x86/lib/copy_user_64.o: warning: objtool: copy_user_handle_tail()+0x0: <=== (func) copy_user_handle_tail() is incorrectly marked as a callable function, so objtool is rightfully concerned about the CLAC with no corresponding STAC. Remove the ELF function annotation. The copy_user_handle_tail() code path is already verified by objtool because it's jumped to by other callable asm code (which does the corresponding STAC). Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/6b6e436774678b4b9873811ff023bd29935bee5b.1563413318.git.jpoimboe@redhat.com --- arch/x86/lib/copy_user_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 378a1f70ae7d..4fe1601dbc5d 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -239,7 +239,7 @@ copy_user_handle_tail: ret _ASM_EXTABLE_UA(1b, 2b) -ENDPROC(copy_user_handle_tail) +END(copy_user_handle_tail) /* * copy_user_nocache - Uncached memory copy with exception handling -- cgit v1.2.3-59-g8ed1b From 5e307a6bc7b600999742675dd182bcb8a6fe308e Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:43 -0500 Subject: x86/uaccess: Don't leak AC flag into fentry from mcsafe_handle_tail() After adding mcsafe_handle_tail() to the objtool uaccess safe list, objtool reports: arch/x86/lib/usercopy_64.o: warning: objtool: mcsafe_handle_tail()+0x0: call to __fentry__() with UACCESS enabled With SMAP, this function is called with AC=1, so it needs to be careful about which functions it calls. Disable the ftrace entry hook, which can potentially pull in a lot of extra code. Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/8e13d6f0da1c8a3f7603903da6cbf6d582bbfe10.1563413318.git.jpoimboe@redhat.com --- arch/x86/lib/usercopy_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index e0e006f1624e..fff28c6f73a2 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -60,7 +60,7 @@ EXPORT_SYMBOL(clear_user); * but reuse __memcpy_mcsafe in case a new read error is encountered. * clac() is handled in _copy_to_iter_mcsafe(). */ -__visible unsigned long +__visible notrace unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len) { for (; len; --len, to++, from++) { -- cgit v1.2.3-59-g8ed1b From 82e844a6536d1a3c12a73e44712f4021d90a4b53 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 17 Jul 2019 20:36:44 -0500 Subject: x86/uaccess: Remove redundant CLACs in getuser/putuser error paths The same getuser/putuser error paths are used regardless of whether AC is set. In non-exception failure cases, this results in an unnecessary CLAC. Fixes the following warnings: arch/x86/lib/getuser.o: warning: objtool: .altinstr_replacement+0x18: redundant UACCESS disable arch/x86/lib/putuser.o: warning: objtool: .altinstr_replacement+0x18: redundant UACCESS disable Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/bc14ded2755ae75bd9010c446079e113dbddb74b.1563413318.git.jpoimboe@redhat.com --- arch/x86/lib/getuser.S | 20 ++++++++++---------- arch/x86/lib/putuser.S | 29 ++++++++++++++++------------- 2 files changed, 26 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 74fdff968ea3..304f958c27b2 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -115,29 +115,29 @@ ENDPROC(__get_user_8) EXPORT_SYMBOL(__get_user_8) +bad_get_user_clac: + ASM_CLAC bad_get_user: xor %edx,%edx mov $(-EFAULT),%_ASM_AX - ASM_CLAC ret -END(bad_get_user) #ifdef CONFIG_X86_32 +bad_get_user_8_clac: + ASM_CLAC bad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX - ASM_CLAC ret -END(bad_get_user_8) #endif - _ASM_EXTABLE_UA(1b, bad_get_user) - _ASM_EXTABLE_UA(2b, bad_get_user) - _ASM_EXTABLE_UA(3b, bad_get_user) + _ASM_EXTABLE_UA(1b, bad_get_user_clac) + _ASM_EXTABLE_UA(2b, bad_get_user_clac) + _ASM_EXTABLE_UA(3b, bad_get_user_clac) #ifdef CONFIG_X86_64 - _ASM_EXTABLE_UA(4b, bad_get_user) + _ASM_EXTABLE_UA(4b, bad_get_user_clac) #else - _ASM_EXTABLE_UA(4b, bad_get_user_8) - _ASM_EXTABLE_UA(5b, bad_get_user_8) + _ASM_EXTABLE_UA(4b, bad_get_user_8_clac) + _ASM_EXTABLE_UA(5b, bad_get_user_8_clac) #endif diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index d2e5c9c39601..14bf78341d3c 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -32,8 +32,6 @@ */ #define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX -#define EXIT ASM_CLAC ; \ - ret .text ENTRY(__put_user_1) @@ -43,7 +41,8 @@ ENTRY(__put_user_1) ASM_STAC 1: movb %al,(%_ASM_CX) xor %eax,%eax - EXIT + ASM_CLAC + ret ENDPROC(__put_user_1) EXPORT_SYMBOL(__put_user_1) @@ -56,7 +55,8 @@ ENTRY(__put_user_2) ASM_STAC 2: movw %ax,(%_ASM_CX) xor %eax,%eax - EXIT + ASM_CLAC + ret ENDPROC(__put_user_2) EXPORT_SYMBOL(__put_user_2) @@ -69,7 +69,8 @@ ENTRY(__put_user_4) ASM_STAC 3: movl %eax,(%_ASM_CX) xor %eax,%eax - EXIT + ASM_CLAC + ret ENDPROC(__put_user_4) EXPORT_SYMBOL(__put_user_4) @@ -85,19 +86,21 @@ ENTRY(__put_user_8) 5: movl %edx,4(%_ASM_CX) #endif xor %eax,%eax - EXIT + ASM_CLAC + RET ENDPROC(__put_user_8) EXPORT_SYMBOL(__put_user_8) +bad_put_user_clac: + ASM_CLAC bad_put_user: movl $-EFAULT,%eax - EXIT -END(bad_put_user) + RET - _ASM_EXTABLE_UA(1b, bad_put_user) - _ASM_EXTABLE_UA(2b, bad_put_user) - _ASM_EXTABLE_UA(3b, bad_put_user) - _ASM_EXTABLE_UA(4b, bad_put_user) + _ASM_EXTABLE_UA(1b, bad_put_user_clac) + _ASM_EXTABLE_UA(2b, bad_put_user_clac) + _ASM_EXTABLE_UA(3b, bad_put_user_clac) + _ASM_EXTABLE_UA(4b, bad_put_user_clac) #ifdef CONFIG_X86_32 - _ASM_EXTABLE_UA(5b, bad_put_user) + _ASM_EXTABLE_UA(5b, bad_put_user_clac) #endif -- cgit v1.2.3-59-g8ed1b