From 83c54070ee1a2d05c89793884bea1a03f2851ed4 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Thu, 19 Jul 2007 01:47:05 -0700 Subject: mm: fault feedback #2 This patch completes Linus's wish that the fault return codes be made into bit flags, which I agree makes everything nicer. This requires requires all handle_mm_fault callers to be modified (possibly the modifications should go further and do things like fault accounting in handle_mm_fault -- however that would be for another patch). [akpm@linux-foundation.org: fix alpha build] [akpm@linux-foundation.org: fix s390 build] [akpm@linux-foundation.org: fix sparc build] [akpm@linux-foundation.org: fix sparc64 build] [akpm@linux-foundation.org: fix ia64 build] Signed-off-by: Nick Piggin Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Russell King Cc: Ian Molton Cc: Bryan Wu Cc: Mikael Starvik Cc: David Howells Cc: Yoshinori Sato Cc: "Luck, Tony" Cc: Hirokazu Takata Cc: Geert Uytterhoeven Cc: Roman Zippel Cc: Greg Ungerer Cc: Matthew Wilcox Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Paul Mundt Cc: Kazumoto Kojima Cc: Richard Curnow Cc: William Lee Irwin III Cc: "David S. Miller" Cc: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Cc: Miles Bader Cc: Chris Zankel Acked-by: Kyle McMartin Acked-by: Haavard Skinnemoen Acked-by: Ralf Baechle Acked-by: Andi Kleen Signed-off-by: Andrew Morton [ Still apparently needs some ARM and PPC loving - Linus ] Signed-off-by: Linus Torvalds --- arch/alpha/mm/fault.c | 22 ++++----- arch/arm/mm/fault.c | 36 +++++++------- arch/arm26/mm/fault.c | 30 ++++++------ arch/avr32/mm/fault.c | 23 +++++---- arch/cris/mm/fault.c | 23 ++++----- arch/frv/mm/fault.c | 23 ++++----- arch/i386/mm/fault.c | 23 +++++---- arch/ia64/mm/fault.c | 26 +++++----- arch/m32r/mm/fault.c | 23 +++++---- arch/m68k/mm/fault.c | 21 ++++---- arch/mips/mm/fault.c | 23 +++++---- arch/parisc/mm/fault.c | 23 ++++----- arch/powerpc/mm/fault.c | 26 +++++----- arch/powerpc/platforms/cell/spufs/fault.c | 28 +++++------ arch/ppc/mm/fault.c | 23 +++++---- arch/s390/lib/uaccess_pt.c | 23 +++++---- arch/s390/mm/fault.c | 30 ++++++------ arch/sh/mm/fault.c | 23 +++++---- arch/sh64/mm/fault.c | 24 +++++----- arch/sparc/mm/fault.c | 22 ++++----- arch/sparc64/mm/fault.c | 24 +++++----- arch/um/kernel/trap.c | 29 +++++------ arch/x86_64/mm/fault.c | 25 +++++----- arch/xtensa/mm/fault.c | 23 +++++---- fs/gfs2/ops_vm.c | 11 +++-- include/linux/mm.h | 58 +++++----------------- kernel/futex.c | 21 ++++---- mm/filemap.c | 6 +-- mm/filemap_xip.c | 2 +- mm/hugetlb.c | 10 ++-- mm/memory.c | 80 +++++++++++++++---------------- mm/shmem.c | 8 ++-- 32 files changed, 373 insertions(+), 419 deletions(-) diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index f5862792a167..a0e18da594d9 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -148,21 +148,17 @@ do_page_fault(unsigned long address, unsigned long mmcsr, the fault. */ fault = handle_mm_fault(mm, vma, address, cause > 0); up_read(&mm->mmap_sem); - - switch (fault) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; return; /* Something tried to access memory that isn't in our memory map. diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 75d491448e45..c04124a095cf 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -183,20 +183,20 @@ good_area: */ survive: fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); - - /* - * Handle the "normal" cases first - successful and sigbus - */ - switch (fault) { - case VM_FAULT_MAJOR: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + return fault; + BUG(); + } + if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; - return fault; - case VM_FAULT_MINOR: + else tsk->min_flt++; - case VM_FAULT_SIGBUS: - return fault; - } + return fault; +out_of_memory: if (!is_init(tsk)) goto out; @@ -249,7 +249,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) /* * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR */ - if (fault >= VM_FAULT_MINOR) + if (likely(!(fault & VM_FAULT_ERROR))) return 0; /* @@ -259,8 +259,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!user_mode(regs)) goto no_context; - switch (fault) { - case VM_FAULT_OOM: + if (fault & VM_FAULT_OOM) { /* * We ran out of memory, or some other thing * happened to us that made us unable to handle @@ -269,17 +268,15 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) printk("VM: killing process %s\n", tsk->comm); do_exit(SIGKILL); return 0; - - case VM_FAULT_SIGBUS: + } + if (fault & VM_FAULT_SIGBUS) { /* * We had some memory, but were unable to * successfully fix up this page fault. */ sig = SIGBUS; code = BUS_ADRERR; - break; - - default: + } else { /* * Something tried to access memory that * isn't in our memory map.. @@ -287,7 +284,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) sig = SIGSEGV; code = fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR; - break; } __do_user_fault(tsk, addr, fsr, sig, code, regs); diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c index 93c0cee0fb5e..dec638a0c8d9 100644 --- a/arch/arm26/mm/fault.c +++ b/arch/arm26/mm/fault.c @@ -170,20 +170,20 @@ good_area: */ survive: fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr)); - - /* - * Handle the "normal" cases first - successful and sigbus - */ - switch (fault) { - case VM_FAULT_MAJOR: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + return fault; + BUG(); + } + if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; - return fault; - case VM_FAULT_MINOR: + else tsk->min_flt++; - case VM_FAULT_SIGBUS: - return fault; - } + return fault; +out_of_memory: fault = -3; /* out of memory */ if (!is_init(tsk)) goto out; @@ -225,13 +225,11 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) /* * Handle the "normal" case first */ - switch (fault) { - case VM_FAULT_MINOR: - case VM_FAULT_MAJOR: + if (likely(!(fault & VM_FAULT_ERROR))) return 0; - case VM_FAULT_SIGBUS: + if (fault & VM_FAULT_SIGBUS) goto do_sigbus; - } + /* else VM_FAULT_OOM */ /* * If we are in kernel mode at this point, we diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 4b2495285d94..ae2d2c593b2b 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -64,6 +64,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) int writeaccess; long signr; int code; + int fault; if (notify_page_fault(regs, ecr)) return; @@ -132,20 +133,18 @@ good_area: * fault. */ survive: - switch (handle_mm_fault(mm, vma, address, writeaccess)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, writeaccess); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index c73e91f1299a..8672ab7d7978 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -179,6 +179,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, struct mm_struct *mm; struct vm_area_struct * vma; siginfo_t info; + int fault; D(printk("Page fault for %lX on %X at %lX, prot %d write %d\n", address, smp_processor_id(), instruction_pointer(regs), @@ -283,18 +284,18 @@ do_page_fault(unsigned long address, struct pt_regs *regs, * the fault. */ - switch (handle_mm_fault(mm, vma, address, writeaccess & 1)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - default: - goto out_of_memory; + fault = handle_mm_fault(mm, vma, address, writeaccess & 1); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 3f12296c3688..6798fa0257b1 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -40,6 +40,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear pud_t *pue; pte_t *pte; int write; + int fault; #if 0 const char *atxc[16] = { @@ -162,18 +163,18 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, ear0, write)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - default: - goto out_of_memory; + fault = handle_mm_fault(mm, vma, ear0, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index 1ecb3e43b523..e92a10124935 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c @@ -303,6 +303,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, struct vm_area_struct * vma; unsigned long address; int write, si_code; + int fault; /* get the address */ address = read_cr2(); @@ -422,20 +423,18 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, address, write)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: + fault = handle_mm_fault(mm, vma, address, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) goto out_of_memory; - default: - BUG(); + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; /* * Did it hit the DOS screen memory VA from vm86 mode? diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index b87f785c2416..73ccb6010c05 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -80,6 +80,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re struct mm_struct *mm = current->mm; struct siginfo si; unsigned long mask; + int fault; /* mmap_sem is performance critical.... */ prefetchw(&mm->mmap_sem); @@ -147,26 +148,25 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re * sure we exit gracefully rather than endlessly redo the * fault. */ - switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) { - case VM_FAULT_MINOR: - ++current->min_flt; - break; - case VM_FAULT_MAJOR: - ++current->maj_flt; - break; - case VM_FAULT_SIGBUS: + fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0); + if (unlikely(fault & VM_FAULT_ERROR)) { /* * We ran out of memory, or some other thing happened * to us that made us unable to handle the page fault * gracefully. */ - signal = SIGBUS; - goto bad_area; - case VM_FAULT_OOM: - goto out_of_memory; - default: + if (fault & VM_FAULT_OOM) { + goto out_of_memory; + } else if (fault & VM_FAULT_SIGBUS) { + signal = SIGBUS; + goto bad_area; + } BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index f3935ba24946..676a1c443d28 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -80,6 +80,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, struct vm_area_struct * vma; unsigned long page, addr; int write; + int fault; siginfo_t info; /* @@ -195,20 +196,18 @@ survive: */ addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - switch (handle_mm_fault(mm, vma, addr, write)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: + fault = handle_mm_fault(mm, vma, addr, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) goto out_of_memory; - default: - BUG(); + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; set_thread_fault_code(0); up_read(&mm->mmap_sem); return; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 2adbeb16e1b8..578b48f47b9e 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -159,18 +159,17 @@ good_area: #ifdef DEBUG printk("handle_mm_fault returns %d\n",fault); #endif - switch (fault) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto bus_err; - default: - goto out_of_memory; + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto bus_err; + BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return 0; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 7ebea331edb8..521771b373de 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -39,6 +39,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, struct mm_struct *mm = tsk->mm; const int field = sizeof(unsigned long) * 2; siginfo_t info; + int fault; #if 0 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), @@ -102,20 +103,18 @@ survive: * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, address, write)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index f6f67554c623..7899ab87785a 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -147,6 +147,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, struct mm_struct *mm = tsk->mm; const struct exception_table_entry *fix; unsigned long acc_type; + int fault; if (in_atomic() || !mm) goto no_context; @@ -173,23 +174,23 @@ good_area: * fault. */ - switch (handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0)) { - case VM_FAULT_MINOR: - ++current->min_flt; - break; - case VM_FAULT_MAJOR: - ++current->maj_flt; - break; - case VM_FAULT_SIGBUS: + fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0); + if (unlikely(fault & VM_FAULT_ERROR)) { /* * We hit a shared mapping outside of the file, or some * other thing happened to us that made us unable to * handle the page fault gracefully. */ - goto bad_area; - default: - goto out_of_memory; + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto bad_area; + BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 0ece51310bfe..3767211b3d0f 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -145,7 +145,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, struct mm_struct *mm = current->mm; siginfo_t info; int code = SEGV_MAPERR; - int is_write = 0; + int is_write = 0, ret; int trap = TRAP(regs); int is_exec = trap == 0x400; @@ -330,22 +330,18 @@ good_area: * the fault. */ survive: - switch (handle_mm_fault(mm, vma, address, is_write)) { - - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + ret = handle_mm_fault(mm, vma, address, is_write); + if (unlikely(ret & VM_FAULT_ERROR)) { + if (ret & VM_FAULT_OOM) + goto out_of_memory; + else if (ret & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } - + if (ret & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return 0; diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index e064d0c0d80e..07f88de0544d 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -74,23 +74,21 @@ good_area: goto bad_area; } ret = 0; - *flt = handle_mm_fault(mm, vma, ea, is_write); - switch (*flt) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - ret = -EFAULT; - goto bad_area; - case VM_FAULT_OOM: - ret = -ENOMEM; - goto bad_area; - default: + fault = handle_mm_fault(mm, vma, ea, is_write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) { + ret = -ENOMEM; + goto bad_area; + } else if (fault & VM_FAULT_SIGBUS) { + ret = -EFAULT; + goto bad_area; + } BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return ret; diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c index 465f451f3bc3..b98244e277fb 100644 --- a/arch/ppc/mm/fault.c +++ b/arch/ppc/mm/fault.c @@ -96,6 +96,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, struct mm_struct *mm = current->mm; siginfo_t info; int code = SEGV_MAPERR; + int fault; #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) int is_write = error_code & ESR_DST; #else @@ -249,20 +250,18 @@ good_area: * the fault. */ survive: - switch (handle_mm_fault(mm, vma, address, is_write)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, is_write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); /* diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 63181671e3e3..60604b2819b2 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -20,6 +20,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address, { struct vm_area_struct *vma; int ret = -EFAULT; + int fault; if (in_atomic()) return ret; @@ -44,20 +45,18 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address, } survive: - switch (handle_mm_fault(mm, vma, address, write_access)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto out_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, write_access); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto out_sigbus; BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; ret = 0; out: up_read(&mm->mmap_sem); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index d855cdbf8fb8..54055194e9af 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -307,6 +307,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) unsigned long address; int space; int si_code; + int fault; if (notify_page_fault(regs, error_code)) return; @@ -377,23 +378,22 @@ survive: * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, address, write)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - do_sigbus(regs, error_code, address); - return; - case VM_FAULT_OOM: - if (do_out_of_memory(regs, error_code, address)) - goto survive; - return; - default: + fault = handle_mm_fault(mm, vma, address, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) { + if (do_out_of_memory(regs, error_code, address)) + goto survive; + return; + } else if (fault & VM_FAULT_SIGBUS) { + do_sigbus(regs, error_code, address); + return; + } BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; up_read(&mm->mmap_sem); /* diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 0b3eaf6fbb28..964c6767dc73 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -33,6 +33,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, struct mm_struct *mm; struct vm_area_struct * vma; int si_code; + int fault; siginfo_t info; trace_hardirqs_on(); @@ -124,20 +125,18 @@ good_area: * the fault. */ survive: - switch (handle_mm_fault(mm, vma, address, writeaccess)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: + fault = handle_mm_fault(mm, vma, address, writeaccess); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) goto out_of_memory; - default: - BUG(); + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c index 3cd93ba5d826..0d069d82141f 100644 --- a/arch/sh64/mm/fault.c +++ b/arch/sh64/mm/fault.c @@ -127,6 +127,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, struct vm_area_struct * vma; const struct exception_table_entry *fixup; pte_t *pte; + int fault; #if defined(CONFIG_SH64_PROC_TLB) ++calls_to_do_slow_page_fault; @@ -221,18 +222,19 @@ good_area: * the fault. */ survive: - switch (handle_mm_fault(mm, vma, address, writeaccess)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - default: - goto out_of_memory; + fault = handle_mm_fault(mm, vma, address, writeaccess); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + /* If we get here, the page fault has been handled. Do the TLB refill now from the newly-setup PTE, to avoid having to fault again right away on the same instruction. */ diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c index c3483365db4b..50747fe44356 100644 --- a/arch/sparc/mm/fault.c +++ b/arch/sparc/mm/fault.c @@ -226,6 +226,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned long g2; siginfo_t info; int from_user = !(regs->psr & PSR_PS); + int fault; if(text_fault) address = regs->pc; @@ -289,19 +290,18 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, address, write)) { - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - case VM_FAULT_MAJOR: + fault = handle_mm_fault(mm, vma, address, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + if (fault & VM_FAULT_MAJOR) current->maj_flt++; - break; - case VM_FAULT_MINOR: - default: + else current->min_flt++; - break; - } up_read(&mm->mmap_sem); return; diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index b582024d2199..17123e9ecf78 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -278,7 +278,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned int insn = 0; - int si_code, fault_code; + int si_code, fault_code, fault; unsigned long address, mm_rss; fault_code = get_thread_fault_code(); @@ -415,20 +415,18 @@ good_area: goto bad_area; } - switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE)); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index abab90c3803f..3850d53f79fd 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -76,23 +76,24 @@ good_area: goto out; do { + int fault; survive: - switch (handle_mm_fault(mm, vma, address, is_write)){ - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - err = -EACCES; - goto out; - case VM_FAULT_OOM: - err = -ENOMEM; - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, is_write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) { + err = -ENOMEM; + goto out_of_memory; + } else if (fault & VM_FAULT_SIGBUS) { + err = -EACCES; + goto out; + } BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + pgd = pgd_offset(mm, address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 635e58d443d7..84f11728fc76 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -317,7 +317,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, struct vm_area_struct * vma; unsigned long address; const struct exception_table_entry *fixup; - int write; + int write, fault; unsigned long flags; siginfo_t info; @@ -450,19 +450,18 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, address, write)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - default: - goto out_of_memory; + fault = handle_mm_fault(mm, vma, address, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } - + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 3dc6f2f07bbe..16004067add3 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -41,6 +41,7 @@ void do_page_fault(struct pt_regs *regs) siginfo_t info; int is_write, is_exec; + int fault; info.si_code = SEGV_MAPERR; @@ -102,20 +103,18 @@ good_area: * the fault. */ survive: - switch (handle_mm_fault(mm, vma, address, is_write)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, is_write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return; diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c index dc287d2e3a66..927d739d4685 100644 --- a/fs/gfs2/ops_vm.c +++ b/fs/gfs2/ops_vm.c @@ -112,7 +112,7 @@ static int gfs2_sharewrite_fault(struct vm_area_struct *vma, struct gfs2_holder i_gh; int alloc_required; int error; - int ret = VM_FAULT_MINOR; + int ret = 0; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); if (error) @@ -132,14 +132,19 @@ static int gfs2_sharewrite_fault(struct vm_area_struct *vma, set_bit(GFF_EXLOCK, &gf->f_flags); ret = filemap_fault(vma, vmf); clear_bit(GFF_EXLOCK, &gf->f_flags); - if (ret & (VM_FAULT_ERROR | FAULT_RET_NOPAGE)) + if (ret & VM_FAULT_ERROR) goto out_unlock; if (alloc_required) { /* XXX: do we need to drop page lock around alloc_page_backing?*/ error = alloc_page_backing(ip, vmf->page); if (error) { - if (ret & FAULT_RET_LOCKED) + /* + * VM_FAULT_LOCKED should always be the case for + * filemap_fault, but it may not be in a future + * implementation. + */ + if (ret & VM_FAULT_LOCKED) unlock_page(vmf->page); page_cache_release(vmf->page); ret = VM_FAULT_OOM; diff --git a/include/linux/mm.h b/include/linux/mm.h index ff0b8844bd5a..f8e12b3b6110 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -196,25 +196,10 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ -#define FAULT_RET_NOPAGE 0x0100 /* ->fault did not return a page. This - * can be used if the handler installs - * their own pte. - */ -#define FAULT_RET_LOCKED 0x0200 /* ->fault locked the page, caller must - * unlock after installing the mapping. - * This is used by pagecache in - * particular, where the page lock is - * used to synchronise against truncate - * and invalidate. Mutually exclusive - * with FAULT_RET_NOPAGE. - */ - /* * vm_fault is filled by the the pagefault handler and passed to the vma's - * ->fault function. The vma's ->fault is responsible for returning the - * VM_FAULT_xxx type which occupies the lowest byte of the return code, ORed - * with FAULT_RET_ flags that occupy the next byte and give details about - * how the fault was handled. + * ->fault function. The vma's ->fault is responsible for returning a bitmask + * of VM_FAULT_xxx flags that give details about how the fault was handled. * * pgoff should be used in favour of virtual_address, if possible. If pgoff * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear @@ -226,9 +211,9 @@ struct vm_fault { void __user *virtual_address; /* Faulting virtual address */ struct page *page; /* ->fault handlers should return a - * page here, unless FAULT_RET_NOPAGE + * page here, unless VM_FAULT_NOPAGE * is set (which is also implied by - * VM_FAULT_OOM or SIGBUS). + * VM_FAULT_ERROR). */ }; @@ -712,26 +697,17 @@ static inline int page_mapped(struct page *page) * just gets major/minor fault counters bumped up. */ -/* - * VM_FAULT_ERROR is set for the error cases, to make some tests simpler. - */ -#define VM_FAULT_ERROR 0x20 +#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ -#define VM_FAULT_OOM (0x00 | VM_FAULT_ERROR) -#define VM_FAULT_SIGBUS (0x01 | VM_FAULT_ERROR) -#define VM_FAULT_MINOR 0x02 -#define VM_FAULT_MAJOR 0x03 +#define VM_FAULT_OOM 0x0001 +#define VM_FAULT_SIGBUS 0x0002 +#define VM_FAULT_MAJOR 0x0004 +#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ -/* - * Special case for get_user_pages. - * Must be in a distinct bit from the above VM_FAULT_ flags. - */ -#define VM_FAULT_WRITE 0x10 +#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ +#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ -/* - * Mask of VM_FAULT_ flags - */ -#define VM_FAULT_MASK 0xff +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) @@ -817,16 +793,8 @@ extern int vmtruncate(struct inode * inode, loff_t offset); extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); #ifdef CONFIG_MMU -extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, +extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access); - -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - int write_access) -{ - return __handle_mm_fault(mm, vma, address, write_access) & - (~VM_FAULT_WRITE); -} #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, diff --git a/kernel/futex.c b/kernel/futex.c index 5c3f45d07c53..a12425051ee9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -346,15 +346,20 @@ static int futex_handle_fault(unsigned long address, vma = find_vma(mm, address); if (vma && address >= vma->vm_start && (vma->vm_flags & VM_WRITE)) { - switch (handle_mm_fault(mm, vma, address, 1)) { - case VM_FAULT_MINOR: - ret = 0; - current->min_flt++; - break; - case VM_FAULT_MAJOR: + int fault; + fault = handle_mm_fault(mm, vma, address, 1); + if (unlikely((fault & VM_FAULT_ERROR))) { +#if 0 + /* XXX: let's do this when we verify it is OK */ + if (ret & VM_FAULT_OOM) + ret = -ENOMEM; +#endif + } else { ret = 0; - current->maj_flt++; - break; + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; } } if (!fshared) diff --git a/mm/filemap.c b/mm/filemap.c index 0876cc57255f..4fd9e3f0f48a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1322,9 +1322,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct page *page; unsigned long size; int did_readaround = 0; - int ret; - - ret = VM_FAULT_MINOR; + int ret = 0; size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (vmf->pgoff >= size) @@ -1408,7 +1406,7 @@ retry_find: */ mark_page_accessed(page); vmf->page = page; - return ret | FAULT_RET_LOCKED; + return ret | VM_FAULT_LOCKED; outside_data_content: /* diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 847d5d78163e..53ee6a299635 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -252,7 +252,7 @@ static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf) out: page_cache_get(page); vmf->page = page; - return VM_FAULT_MINOR; + return 0; } static struct vm_operations_struct xip_file_vm_ops = { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index aaa7c1a682d9..c4a573b857bd 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -469,7 +469,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, avoidcopy = (page_count(old_page) == 1); if (avoidcopy) { set_huge_ptep_writable(vma, address, ptep); - return VM_FAULT_MINOR; + return 0; } page_cache_get(old_page); @@ -494,7 +494,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, } page_cache_release(new_page); page_cache_release(old_page); - return VM_FAULT_MINOR; + return 0; } static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, @@ -551,7 +551,7 @@ retry: if (idx >= size) goto backout; - ret = VM_FAULT_MINOR; + ret = 0; if (!pte_none(*ptep)) goto backout; @@ -602,7 +602,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, return ret; } - ret = VM_FAULT_MINOR; + ret = 0; spin_lock(&mm->page_table_lock); /* Check for a racing update before calling hugetlb_cow */ @@ -641,7 +641,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); ret = hugetlb_fault(mm, vma, vaddr, 0); spin_lock(&mm->page_table_lock); - if (ret == VM_FAULT_MINOR) + if (!(ret & VM_FAULT_MAJOR)) continue; remainder = 0; diff --git a/mm/memory.c b/mm/memory.c index 23c870479b3e..61d51da7e17c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1068,31 +1068,30 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, cond_resched(); while (!(page = follow_page(vma, start, foll_flags))) { int ret; - ret = __handle_mm_fault(mm, vma, start, + ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE); + if (ret & VM_FAULT_ERROR) { + if (ret & VM_FAULT_OOM) + return i ? i : -ENOMEM; + else if (ret & VM_FAULT_SIGBUS) + return i ? i : -EFAULT; + BUG(); + } + if (ret & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + /* - * The VM_FAULT_WRITE bit tells us that do_wp_page has - * broken COW when necessary, even if maybe_mkwrite - * decided not to set pte_write. We can thus safely do - * subsequent page lookups as if they were reads. + * The VM_FAULT_WRITE bit tells us that + * do_wp_page has broken COW when necessary, + * even if maybe_mkwrite decided not to set + * pte_write. We can thus safely do subsequent + * page lookups as if they were reads. */ if (ret & VM_FAULT_WRITE) foll_flags &= ~FOLL_WRITE; - - switch (ret & ~VM_FAULT_WRITE) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - return i ? i : -EFAULT; - case VM_FAULT_OOM: - return i ? i : -ENOMEM; - default: - BUG(); - } + cond_resched(); } if (pages) { @@ -1639,7 +1638,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, { struct page *old_page, *new_page; pte_t entry; - int reuse = 0, ret = VM_FAULT_MINOR; + int reuse = 0, ret = 0; struct page *dirty_page = NULL; old_page = vm_normal_page(vma, address, orig_pte); @@ -1835,8 +1834,8 @@ static int unmap_mapping_range_vma(struct vm_area_struct *vma, /* * files that support invalidating or truncating portions of the * file from under mmaped areas must have their ->fault function - * return a locked page (and FAULT_RET_LOCKED code). This provides - * synchronisation against concurrent unmapping here. + * return a locked page (and set VM_FAULT_LOCKED in the return). + * This provides synchronisation against concurrent unmapping here. */ again: @@ -2140,7 +2139,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page; swp_entry_t entry; pte_t pte; - int ret = VM_FAULT_MINOR; + int ret = 0; if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) goto out; @@ -2208,8 +2207,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unlock_page(page); if (write_access) { + /* XXX: We could OR the do_wp_page code with this one? */ if (do_wp_page(mm, vma, address, - page_table, pmd, ptl, pte) == VM_FAULT_OOM) + page_table, pmd, ptl, pte) & VM_FAULT_OOM) ret = VM_FAULT_OOM; goto out; } @@ -2280,7 +2280,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, lazy_mmu_prot_update(entry); unlock: pte_unmap_unlock(page_table, ptl); - return VM_FAULT_MINOR; + return 0; release: page_cache_release(page); goto unlock; @@ -2323,11 +2323,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (likely(vma->vm_ops->fault)) { ret = vma->vm_ops->fault(vma, &vmf); - if (unlikely(ret & (VM_FAULT_ERROR | FAULT_RET_NOPAGE))) - return (ret & VM_FAULT_MASK); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) + return ret; } else { /* Legacy ->nopage path */ - ret = VM_FAULT_MINOR; + ret = 0; vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret); /* no page was available -- either SIGBUS or OOM */ if (unlikely(vmf.page == NOPAGE_SIGBUS)) @@ -2340,7 +2340,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, * For consistency in subsequent calls, make the faulted page always * locked. */ - if (unlikely(!(ret & FAULT_RET_LOCKED))) + if (unlikely(!(ret & VM_FAULT_LOCKED))) lock_page(vmf.page); else VM_BUG_ON(!PageLocked(vmf.page)); @@ -2356,7 +2356,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_OOM; goto out; } - page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, + vma, address); if (!page) { ret = VM_FAULT_OOM; goto out; @@ -2384,7 +2385,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, * is better done later. */ if (!page->mapping) { - ret = VM_FAULT_MINOR; + ret = 0; anon = 1; /* no anon but release vmf.page */ goto out; } @@ -2447,7 +2448,7 @@ out_unlocked: put_page(dirty_page); } - return (ret & VM_FAULT_MASK); + return ret; } static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, @@ -2486,7 +2487,6 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl; pte_t entry; unsigned long pfn; - int ret = VM_FAULT_MINOR; pte_unmap(page_table); BUG_ON(!(vma->vm_flags & VM_PFNMAP)); @@ -2498,7 +2498,7 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, else if (unlikely(pfn == NOPFN_SIGBUS)) return VM_FAULT_SIGBUS; else if (unlikely(pfn == NOPFN_REFAULT)) - return VM_FAULT_MINOR; + return 0; page_table = pte_offset_map_lock(mm, pmd, address, &ptl); @@ -2510,7 +2510,7 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, set_pte_at(mm, address, page_table, entry); } pte_unmap_unlock(page_table, ptl); - return ret; + return 0; } /* @@ -2531,7 +2531,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, pgoff_t pgoff; if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) - return VM_FAULT_MINOR; + return 0; if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || !(vma->vm_flags & VM_CAN_NONLINEAR))) { @@ -2615,13 +2615,13 @@ static inline int handle_pte_fault(struct mm_struct *mm, } unlock: pte_unmap_unlock(pte, ptl); - return VM_FAULT_MINOR; + return 0; } /* * By the time we get here, we already hold the mm semaphore */ -int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, +int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) { pgd_t *pgd; @@ -2650,7 +2650,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, return handle_pte_fault(mm, vma, address, pte, pmd, write_access); } -EXPORT_SYMBOL_GPL(__handle_mm_fault); +EXPORT_SYMBOL_GPL(handle_mm_fault); #ifndef __PAGETABLE_PUD_FOLDED /* diff --git a/mm/shmem.c b/mm/shmem.c index 0a555af8733d..ad155c7745dc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1103,7 +1103,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, return -EFBIG; if (type) - *type = VM_FAULT_MINOR; + *type = 0; /* * Normally, filepage is NULL on entry, and either found @@ -1138,9 +1138,9 @@ repeat: if (!swappage) { shmem_swp_unmap(entry); /* here we actually do the io */ - if (type && *type == VM_FAULT_MINOR) { + if (type && !(*type & VM_FAULT_MAJOR)) { __count_vm_event(PGMAJFAULT); - *type = VM_FAULT_MAJOR; + *type |= VM_FAULT_MAJOR; } spin_unlock(&info->lock); swappage = shmem_swapin(info, swap, idx); @@ -1323,7 +1323,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); mark_page_accessed(vmf->page); - return ret | FAULT_RET_LOCKED; + return ret | VM_FAULT_LOCKED; } #ifdef CONFIG_NUMA -- cgit v1.2.3-59-g8ed1b