aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/m68k/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r--arch/m68k/mm/cache.c2
-rw-r--r--arch/m68k/mm/fault.c15
-rw-r--r--arch/m68k/mm/kmap.c2
-rw-r--r--arch/m68k/mm/mcfmmu.c7
-rw-r--r--arch/m68k/mm/memory.c1
5 files changed, 7 insertions, 20 deletions
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index 5ecb3310e874..b486c0889eec 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -8,7 +8,7 @@
*/
#include <linux/module.h>
-#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
#include <asm/traps.h>
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index a94a814ad6ad..795f483b1050 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -12,10 +12,10 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/perf_event.h>
#include <asm/setup.h>
#include <asm/traps.h>
-#include <asm/pgalloc.h>
extern void die_if_kernel(char *, struct pt_regs *, long);
@@ -85,6 +85,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
mmap_read_lock(mm);
@@ -135,7 +137,7 @@ good_area:
* the fault.
*/
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
pr_debug("handle_mm_fault returns %x\n", fault);
if (fault_signal_pending(fault, regs))
@@ -151,16 +153,7 @@ good_area:
BUG();
}
- /*
- * Major/minor page fault accounting is only done on the
- * initial attempt. If we go through a retry, it is extremely
- * likely that the page will be found in page cache at that point.
- */
if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 14d31d216cef..1269d513b221 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -19,8 +19,8 @@
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
#include <asm/io.h>
+#include <asm/tlbflush.h>
#undef DEBUG
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 7d04210d34f0..2b9cb4a62281 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -20,6 +20,7 @@
#include <asm/mmu_context.h>
#include <asm/mcf_pgalloc.h>
#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
@@ -39,7 +40,6 @@ void __init paging_init(void)
unsigned long address, size;
unsigned long next_pgtable, bootmem_end;
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
- enum zone_type zone;
int i;
empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
@@ -214,11 +214,6 @@ void __init cf_mmu_context_init(void)
/*
* Steal a context from a task that has one at the moment.
- * This is only used on 8xx and 4xx and we presently assume that
- * they don't do SMP. If they do then thicfpgalloc.hs will have to check
- * whether the MM we steal is in use.
- * We also assume that this is only used on systems that don't
- * use an MMU hash table - this is true for 8xx and 4xx.
* This isn't an LRU system, it just frees up each context in
* turn (sort-of pseudo-random replacement :). This would be the
* place to implement an LRU scheme if anyone was motivated to do it.
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 65e0c4071912..fe75aecfb238 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -17,7 +17,6 @@
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
#include <asm/traps.h>
#include <asm/machdep.h>