aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c51
1 files changed, 19 insertions, 32 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 46df4c6aae46..d1634c59ed56 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -46,23 +46,6 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
return 0;
}
-static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
-{
- if (!kprobes_built_in())
- return 0;
- if (user_mode(regs))
- return 0;
- /*
- * To be potentially processing a kprobe fault and to be allowed to call
- * kprobe_running(), we have to be non-preemptible.
- */
- if (preemptible())
- return 0;
- if (!kprobe_running())
- return 0;
- return kprobe_fault_handler(regs, X86_TRAP_PF);
-}
-
/*
* Prefetch quirks:
*
@@ -710,6 +693,10 @@ static void set_signal_archinfo(unsigned long address,
* To avoid leaking information about the kernel page
* table layout, pretend that user-mode accesses to
* kernel addresses are always protection faults.
+ *
+ * NB: This means that failed vsyscalls with vsyscall=none
+ * will have the PROT bit. This doesn't leak any
+ * information and does not appear to cause any problems.
*/
if (address >= TASK_SIZE_MAX)
error_code |= X86_PF_PROT;
@@ -756,8 +743,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
set_signal_archinfo(address, error_code);
/* XXX: hwpoison faults will set the wrong code. */
- force_sig_fault(signal, si_code, (void __user *)address,
- tsk);
+ force_sig_fault(signal, si_code, (void __user *)address);
}
/*
@@ -918,7 +904,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (si_code == SEGV_PKUERR)
force_sig_pkuerr((void __user *)address, pkey);
- force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
@@ -1015,8 +1001,6 @@ static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
vm_fault_t fault)
{
- struct task_struct *tsk = current;
-
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
@@ -1031,6 +1015,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+ struct task_struct *tsk = current;
unsigned lsb = 0;
pr_err(
@@ -1040,11 +1025,11 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
if (fault & VM_FAULT_HWPOISON)
lsb = PAGE_SHIFT;
- force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, tsk);
+ force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
return;
}
#endif
- force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
}
static noinline void
@@ -1280,7 +1265,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
return;
/* kprobes don't want to hook the spurious faults: */
- if (kprobes_fault(regs))
+ if (kprobe_page_fault(regs, X86_TRAP_PF))
return;
/*
@@ -1311,7 +1296,7 @@ void do_user_addr_fault(struct pt_regs *regs,
mm = tsk->mm;
/* kprobes don't want to hook the spurious faults: */
- if (unlikely(kprobes_fault(regs)))
+ if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*
@@ -1369,16 +1354,18 @@ void do_user_addr_fault(struct pt_regs *regs,
#ifdef CONFIG_X86_64
/*
- * Instruction fetch faults in the vsyscall page might need
- * emulation. The vsyscall page is at a high address
- * (>PAGE_OFFSET), but is considered to be part of the user
- * address space.
+ * Faults in the vsyscall page might need emulation. The
+ * vsyscall page is at a high address (>PAGE_OFFSET), but is
+ * considered to be part of the user address space.
*
* The vsyscall page does not have a "real" VMA, so do this
* emulation before we go searching for VMAs.
+ *
+ * PKRU never rejects instruction fetches, so we don't need
+ * to consider the PF_PK bit.
*/
- if ((hw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) {
- if (emulate_vsyscall(regs, address))
+ if (is_vsyscall_vaddr(address)) {
+ if (emulate_vsyscall(hw_error_code, regs, address))
return;
}
#endif