aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hash_utils_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hash_utils_64.c')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c127
1 files changed, 114 insertions, 13 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index c0d2a694fa30..49618461defb 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -100,6 +100,11 @@ unsigned int HPAGE_SHIFT;
#ifdef CONFIG_PPC_64K_PAGES
int mmu_ci_restrictions;
#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static u8 *linear_map_hash_slots;
+static unsigned long linear_map_hash_count;
+static spinlock_t linear_map_hash_lock;
+#endif /* CONFIG_DEBUG_PAGEALLOC */
/* There are definitions of page sizes arrays to be used when none
* is provided by the firmware.
@@ -152,11 +157,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
for (vaddr = vstart, paddr = pstart; vaddr < vend;
vaddr += step, paddr += step) {
- unsigned long vpn, hash, hpteg;
+ unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr);
unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
- vpn = va >> shift;
tmp_mode = mode;
/* Make non-kernel text non-executable */
@@ -174,6 +178,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
if (ret < 0)
break;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
+ linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
+#endif /* CONFIG_DEBUG_PAGEALLOC */
}
return ret < 0 ? ret : 0;
}
@@ -281,6 +289,7 @@ static void __init htab_init_page_sizes(void)
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
found:
+#ifndef CONFIG_DEBUG_PAGEALLOC
/*
* Pick a size for the linear mapping. Currently, we only support
* 16M, 1M and 4K which is the default
@@ -289,6 +298,7 @@ static void __init htab_init_page_sizes(void)
mmu_linear_psize = MMU_PAGE_16M;
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
mmu_linear_psize = MMU_PAGE_1M;
+#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef CONFIG_PPC_64K_PAGES
/*
@@ -303,12 +313,14 @@ static void __init htab_init_page_sizes(void)
if (mmu_psize_defs[MMU_PAGE_64K].shift) {
mmu_virtual_psize = MMU_PAGE_64K;
mmu_vmalloc_psize = MMU_PAGE_64K;
+ if (mmu_linear_psize == MMU_PAGE_4K)
+ mmu_linear_psize = MMU_PAGE_64K;
if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
mmu_io_psize = MMU_PAGE_64K;
else
mmu_ci_restrictions = 1;
}
-#endif
+#endif /* CONFIG_PPC_64K_PAGES */
printk(KERN_DEBUG "Page orders: linear mapping = %d, "
"virtual = %d, io = %d\n",
@@ -476,6 +488,13 @@ void __init htab_initialize(void)
mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
+ 1, lmb.rmo_size));
+ memset(linear_map_hash_slots, 0, linear_map_hash_count);
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
/* On U3 based machines, we need to reserve the DART area and
* _NOT_ map it to avoid cache paradoxes as it's remapped non
* cacheable later on
@@ -573,6 +592,27 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
return pp;
}
+/*
+ * Demote a segment to using 4k pages.
+ * For now this makes the whole process use 4k pages.
+ */
+void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
+{
+#ifdef CONFIG_PPC_64K_PAGES
+ if (mm->context.user_psize == MMU_PAGE_4K)
+ return;
+ mm->context.user_psize = MMU_PAGE_4K;
+ mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
+ get_paca()->context = mm->context;
+ slb_flush_and_rebolt();
+#ifdef CONFIG_SPE_BASE
+ spu_flush_all_slbs(mm);
+#endif
+#endif
+}
+
+EXPORT_SYMBOL_GPL(demote_segment_4k);
+
/* Result code is:
* 0 - handled
* 1 - normal page fault
@@ -665,15 +705,19 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
#ifndef CONFIG_PPC_64K_PAGES
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
+ /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
+ if (pte_val(*ptep) & _PAGE_4K_PFN) {
+ demote_segment_4k(mm, ea);
+ psize = MMU_PAGE_4K;
+ }
+
if (mmu_ci_restrictions) {
/* If this PTE is non-cacheable, switch to 4k */
if (psize == MMU_PAGE_64K &&
(pte_val(*ptep) & _PAGE_NO_CACHE)) {
if (user_region) {
+ demote_segment_4k(mm, ea);
psize = MMU_PAGE_4K;
- mm->context.user_psize = MMU_PAGE_4K;
- mm->context.sllp = SLB_VSID_USER |
- mmu_psize_defs[MMU_PAGE_4K].sllp;
} else if (ea < VMALLOC_END) {
/*
* some driver did a non-cacheable mapping
@@ -685,6 +729,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
}
+#ifdef CONFIG_SPE_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
if (user_region) {
if (psize != get_paca()->context.user_psize) {
@@ -753,13 +800,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
if (mmu_ci_restrictions) {
/* If this PTE is non-cacheable, switch to 4k */
if (mm->context.user_psize == MMU_PAGE_64K &&
- (pte_val(*ptep) & _PAGE_NO_CACHE)) {
- mm->context.user_psize = MMU_PAGE_4K;
- mm->context.sllp = SLB_VSID_USER |
- mmu_psize_defs[MMU_PAGE_4K].sllp;
- get_paca()->context = mm->context;
- slb_flush_and_rebolt();
- }
+ (pte_val(*ptep) & _PAGE_NO_CACHE))
+ demote_segment_4k(mm, ea);
}
if (mm->context.user_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local);
@@ -819,3 +861,62 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address)
}
bad_page_fault(regs, address, SIGBUS);
}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
+{
+ unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr);
+ unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
+ unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
+ _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
+ int ret;
+
+ hash = hpt_hash(va, PAGE_SHIFT);
+ hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+
+ ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
+ mode, HPTE_V_BOLTED, mmu_linear_psize);
+ BUG_ON (ret < 0);
+ spin_lock(&linear_map_hash_lock);
+ BUG_ON(linear_map_hash_slots[lmi] & 0x80);
+ linear_map_hash_slots[lmi] = ret | 0x80;
+ spin_unlock(&linear_map_hash_lock);
+}
+
+static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
+{
+ unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr);
+ unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
+
+ hash = hpt_hash(va, PAGE_SHIFT);
+ spin_lock(&linear_map_hash_lock);
+ BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+ hidx = linear_map_hash_slots[lmi] & 0x7f;
+ linear_map_hash_slots[lmi] = 0;
+ spin_unlock(&linear_map_hash_lock);
+ if (hidx & _PTEIDX_SECONDARY)
+ hash = ~hash;
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot += hidx & _PTEIDX_GROUP_IX;
+ ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0);
+}
+
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long flags, vaddr, lmi;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < numpages; i++, page++) {
+ vaddr = (unsigned long)page_address(page);
+ lmi = __pa(vaddr) >> PAGE_SHIFT;
+ if (lmi >= linear_map_hash_count)
+ continue;
+ if (enable)
+ kernel_map_linear_page(vaddr, lmi);
+ else
+ kernel_unmap_linear_page(vaddr, lmi);
+ }
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_DEBUG_PAGEALLOC */