aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/nohash
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/powerpc/mm/nohash/40x.c10
-rw-r--r--arch/powerpc/mm/nohash/44x.c20
-rw-r--r--arch/powerpc/mm/nohash/8xx.c46
-rw-r--r--arch/powerpc/mm/nohash/Makefile8
-rw-r--r--arch/powerpc/mm/nohash/book3e_hugetlbpage.c204
-rw-r--r--arch/powerpc/mm/nohash/book3e_pgtable.c21
-rw-r--r--arch/powerpc/mm/nohash/e500.c375
-rw-r--r--arch/powerpc/mm/nohash/e500_hugetlbpage.c190
-rw-r--r--arch/powerpc/mm/nohash/fsl_book3e.c379
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c17
-rw-r--r--arch/powerpc/mm/nohash/kup.c33
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c15
-rw-r--r--arch/powerpc/mm/nohash/tlb.c99
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S10
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S183
15 files changed, 748 insertions, 862 deletions
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index 95751c322f6c..3684d6e570fb 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -32,7 +32,6 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
@@ -44,7 +43,6 @@
#include <mm/mmu_decl.h>
-extern int __map_without_ltlbs;
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
@@ -95,7 +93,13 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
p = 0;
s = total_lowmem;
- if (__map_without_ltlbs)
+ if (IS_ENABLED(CONFIG_KFENCE))
+ return 0;
+
+ if (debug_pagealloc_enabled())
+ return 0;
+
+ if (strict_kernel_rwx_enabled())
return 0;
while (s >= LARGE_PAGE_SIZE_16M) {
diff --git a/arch/powerpc/mm/nohash/44x.c b/arch/powerpc/mm/nohash/44x.c
index e079f26b267e..1beae802bb1c 100644
--- a/arch/powerpc/mm/nohash/44x.c
+++ b/arch/powerpc/mm/nohash/44x.c
@@ -38,7 +38,7 @@ int icache_44x_need_flush;
unsigned long tlb_47x_boltmap[1024/8];
-static void ppc44x_update_tlb_hwater(void)
+static void __init ppc44x_update_tlb_hwater(void)
{
/* The TLB miss handlers hard codes the watermark in a cmpli
* instruction to improve performances rather than loading it
@@ -122,7 +122,7 @@ static void __init ppc47x_update_boltmap(void)
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
*/
-static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+static void __init ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
{
unsigned int rA;
int bolted;
@@ -240,19 +240,3 @@ void __init mmu_init_secondary(int cpu)
}
}
#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_PPC_KUEP
-void setup_kuep(bool disabled)
-{
- if (smp_processor_id() != boot_cpuid)
- return;
-
- if (disabled)
- patch_instruction_site(&patch__tlb_44x_kuep, ppc_inst(PPC_RAW_NOP()));
- else
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- if (IS_ENABLED(CONFIG_PPC_47x) && disabled)
- patch_instruction_site(&patch__tlb_47x_kuep, ppc_inst(PPC_RAW_NOP()));
-}
-#endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 0df9fe29dd56..dbbfe897455d 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -8,18 +8,12 @@
*/
#include <linux/memblock.h>
-#include <linux/mmu_context.h>
#include <linux/hugetlb.h>
-#include <asm/fixmap.h>
-#include <asm/code-patching.h>
-#include <asm/inst.h>
#include <mm/mmu_decl.h>
#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
-extern int __map_without_ltlbs;
-
static unsigned long block_mapped_ram;
/*
@@ -32,8 +26,6 @@ phys_addr_t v_block_mapped(unsigned long va)
if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
return p + va - VIRT_IMMR_BASE;
- if (__map_without_ltlbs)
- return 0;
if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
return __pa(va);
return 0;
@@ -49,8 +41,6 @@ unsigned long p_block_mapped(phys_addr_t pa)
if (pa >= p && pa < p + IMMR_SIZE)
return VIRT_IMMR_BASE + pa - p;
- if (__map_without_ltlbs)
- return 0;
if (pa < block_mapped_ram)
return (unsigned long)__va(pa);
return 0;
@@ -157,9 +147,6 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
mmu_mapin_immr();
- if (__map_without_ltlbs)
- return 0;
-
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
if (debug_pagealloc_enabled_or_kfence()) {
top = boundary;
@@ -183,8 +170,8 @@ void mmu_mark_initmem_nx(void)
unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
- mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
- mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
+ if (!debug_pagealloc_enabled_or_kfence())
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
mmu_pin_tlb(block_mapped_ram, false);
}
@@ -212,35 +199,6 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
}
-#ifdef CONFIG_PPC_KUEP
-void __init setup_kuep(bool disabled)
-{
- if (disabled)
- return;
-
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- mtspr(SPRN_MI_AP, MI_APG_KUEP);
-}
-#endif
-
-#ifdef CONFIG_PPC_KUAP
-struct static_key_false disable_kuap_key;
-EXPORT_SYMBOL(disable_kuap_key);
-
-void __init setup_kuap(bool disabled)
-{
- if (disabled) {
- static_branch_enable(&disable_kuap_key);
- return;
- }
-
- pr_info("Activating Kernel Userspace Access Protection\n");
-
- mtspr(SPRN_MD_AP, MD_APG_KUAP);
-}
-#endif
-
int pud_clear_huge(pud_t *pud)
{
return 0;
diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
index b1f630d423d8..f3894e79d5f7 100644
--- a/arch/powerpc/mm/nohash/Makefile
+++ b/arch/powerpc/mm/nohash/Makefile
@@ -2,18 +2,18 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y += mmu_context.o tlb.o tlb_low.o
+obj-y += mmu_context.o tlb.o tlb_low.o kup.o
obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
-obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_book3e.o
+obj-$(CONFIG_PPC_E500) += e500.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o
ifdef CONFIG_HUGETLB_PAGE
-obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o
+obj-$(CONFIG_PPC_E500) += e500_hugetlbpage.o
endif
# Disable kcov instrumentation on sensitive code
# This is necessary for booting with kcov enabled on book3e machines
KCOV_INSTRUMENT_tlb.o := n
-KCOV_INSTRUMENT_fsl_book3e.o := n
+KCOV_INSTRUMENT_e500.o := n
diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
deleted file mode 100644
index 8b88be91b622..000000000000
--- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
+++ /dev/null
@@ -1,204 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PPC Huge TLB Page Support for Book3E MMU
- *
- * Copyright (C) 2009 David Gibson, IBM Corporation.
- * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
- *
- */
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-
-#include <asm/mmu.h>
-
-#ifdef CONFIG_PPC64
-#include <asm/paca.h>
-
-static inline int tlb1_next(void)
-{
- struct paca_struct *paca = get_paca();
- struct tlb_core_data *tcd;
- int this, next;
-
- tcd = paca->tcd_ptr;
- this = tcd->esel_next;
-
- next = this + 1;
- if (next >= tcd->esel_max)
- next = tcd->esel_first;
-
- tcd->esel_next = next;
- return this;
-}
-
-static inline void book3e_tlb_lock(void)
-{
- struct paca_struct *paca = get_paca();
- unsigned long tmp;
- int token = smp_processor_id() + 1;
-
- /*
- * Besides being unnecessary in the absence of SMT, this
- * check prevents trying to do lbarx/stbcx. on e5500 which
- * doesn't implement either feature.
- */
- if (!cpu_has_feature(CPU_FTR_SMT))
- return;
-
- asm volatile("1: lbarx %0, 0, %1;"
- "cmpwi %0, 0;"
- "bne 2f;"
- "stbcx. %2, 0, %1;"
- "bne 1b;"
- "b 3f;"
- "2: lbzx %0, 0, %1;"
- "cmpwi %0, 0;"
- "bne 2b;"
- "b 1b;"
- "3:"
- : "=&r" (tmp)
- : "r" (&paca->tcd_ptr->lock), "r" (token)
- : "memory");
-}
-
-static inline void book3e_tlb_unlock(void)
-{
- struct paca_struct *paca = get_paca();
-
- if (!cpu_has_feature(CPU_FTR_SMT))
- return;
-
- isync();
- paca->tcd_ptr->lock = 0;
-}
-#else
-static inline int tlb1_next(void)
-{
- int index, ncams;
-
- ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
-
- index = this_cpu_read(next_tlbcam_idx);
-
- /* Just round-robin the entries and wrap when we hit the end */
- if (unlikely(index == ncams - 1))
- __this_cpu_write(next_tlbcam_idx, tlbcam_index);
- else
- __this_cpu_inc(next_tlbcam_idx);
-
- return index;
-}
-
-static inline void book3e_tlb_lock(void)
-{
-}
-
-static inline void book3e_tlb_unlock(void)
-{
-}
-#endif
-
-static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
-{
- int found = 0;
-
- mtspr(SPRN_MAS6, pid << 16);
- if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) {
- asm volatile(
- "li %0,0\n"
- "tlbsx. 0,%1\n"
- "bne 1f\n"
- "li %0,1\n"
- "1:\n"
- : "=&r"(found) : "r"(ea));
- } else {
- asm volatile(
- "tlbsx 0,%1\n"
- "mfspr %0,0x271\n"
- "srwi %0,%0,31\n"
- : "=&r"(found) : "r"(ea));
- }
-
- return found;
-}
-
-static void
-book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
-{
- unsigned long mas1, mas2;
- u64 mas7_3;
- unsigned long psize, tsize, shift;
- unsigned long flags;
- struct mm_struct *mm;
- int index;
-
- if (unlikely(is_kernel_addr(ea)))
- return;
-
- mm = vma->vm_mm;
-
- psize = vma_mmu_pagesize(vma);
- shift = __ilog2(psize);
- tsize = shift - 10;
- /*
- * We can't be interrupted while we're setting up the MAS
- * regusters or after we've confirmed that no tlb exists.
- */
- local_irq_save(flags);
-
- book3e_tlb_lock();
-
- if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
- book3e_tlb_unlock();
- local_irq_restore(flags);
- return;
- }
-
- /* We have to use the CAM(TLB1) on FSL parts for hugepages */
- index = tlb1_next();
- mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
-
- mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
- mas2 = ea & ~((1UL << shift) - 1);
- mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
- mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
- mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
- if (!pte_dirty(pte))
- mas7_3 &= ~(MAS3_SW|MAS3_UW);
-
- mtspr(SPRN_MAS1, mas1);
- mtspr(SPRN_MAS2, mas2);
-
- if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
- mtspr(SPRN_MAS7_MAS3, mas7_3);
- } else {
- if (mmu_has_feature(MMU_FTR_BIG_PHYS))
- mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
- mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
- }
-
- asm volatile ("tlbwe");
-
- book3e_tlb_unlock();
- local_irq_restore(flags);
-}
-
-/*
- * This is called at the end of handling a user page fault, when the
- * fault has been handled by updating a PTE in the linux page tables.
- *
- * This must always be called with the pte lock held.
- */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
-{
- if (is_vm_hugetlb_page(vma))
- book3e_hugetlb_preload(vma, address, *ptep);
-}
-
-void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
-{
- struct hstate *hstate = hstate_file(vma->vm_file);
- unsigned long tsize = huge_page_shift(hstate) - 10;
-
- __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
-}
diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c
index 77884e24281d..b80fc4a91a53 100644
--- a/arch/powerpc/mm/nohash/book3e_pgtable.c
+++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
@@ -10,6 +10,7 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/dma.h>
+#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
@@ -95,8 +96,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
pgdp = pgd_offset_k(ea);
p4dp = p4d_offset(pgdp, ea);
if (p4d_none(*p4dp)) {
- pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
- p4d_populate(&init_mm, p4dp, pmdp);
+ pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
+ p4d_populate(&init_mm, p4dp, pudp);
}
pudp = pud_offset(p4dp, ea);
if (pud_none(*pudp)) {
@@ -105,7 +106,7 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
}
pmdp = pmd_offset(pudp, ea);
if (!pmd_present(*pmdp)) {
- ptep = early_alloc_pgtable(PAGE_SIZE);
+ ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
ptep = pte_offset_kernel(pmdp, ea);
@@ -115,3 +116,17 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
smp_wmb();
return 0;
}
+
+void __patch_exception(int exc, unsigned long addr)
+{
+ unsigned int *ibase = &interrupt_base_book3e;
+
+ /*
+ * Our exceptions vectors start with a NOP and -then- a branch
+ * to deal with single stepping from userspace which stops on
+ * the second instruction. Thus we need to patch the second
+ * instruction of the exception, not the first one.
+ */
+
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
+}
diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c
new file mode 100644
index 000000000000..40a4e69ae1a9
--- /dev/null
+++ b/arch/powerpc/mm/nohash/e500.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Modifications by Kumar Gala (galak@kernel.crashing.org) to support
+ * E500 Book E processors.
+ *
+ * Copyright 2004,2010 Freescale Semiconductor, Inc.
+ *
+ * This file contains the routines for initializing the MMU
+ * on the 4xx series of chips.
+ * -- paulus
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+#include <linux/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+#include <asm/paca.h>
+
+#include <mm/mmu_decl.h>
+
+unsigned int tlbcam_index;
+
+struct tlbcam TLBCAM[NUM_TLBCAMS];
+
+static struct {
+ unsigned long start;
+ unsigned long limit;
+ phys_addr_t phys;
+} tlbcam_addrs[NUM_TLBCAMS];
+
+#ifdef CONFIG_PPC_85xx
+/*
+ * Return PA for this VA if it is mapped by a CAM, or 0
+ */
+phys_addr_t v_block_mapped(unsigned long va)
+{
+ int b;
+ for (b = 0; b < tlbcam_index; ++b)
+ if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
+ return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
+ return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_block_mapped(phys_addr_t pa)
+{
+ int b;
+ for (b = 0; b < tlbcam_index; ++b)
+ if (pa >= tlbcam_addrs[b].phys
+ && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+ +tlbcam_addrs[b].phys)
+ return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
+ return 0;
+}
+#endif
+
+/*
+ * Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
+ * in particular size must be a power of 4 between 4k and the max supported by
+ * an implementation; max may further be limited by what can be represented in
+ * an unsigned long (for example, 32-bit implementations cannot support a 4GB
+ * size).
+ */
+static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
+ unsigned long size, unsigned long flags, unsigned int pid)
+{
+ unsigned int tsize;
+
+ tsize = __ilog2(size) - 10;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+ if ((flags & _PAGE_NO_CACHE) == 0)
+ flags |= _PAGE_COHERENT;
+#endif
+
+ TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
+ TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
+ TLBCAM[index].MAS2 = virt & PAGE_MASK;
+
+ TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
+ TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
+ TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
+ TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
+ TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
+
+ TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
+ if (mmu_has_feature(MMU_FTR_BIG_PHYS))
+ TLBCAM[index].MAS7 = (u64)phys >> 32;
+
+ /* Below is unlikely -- only for large user pages or similar */
+ if (pte_user(__pte(flags))) {
+ TLBCAM[index].MAS3 |= MAS3_UR;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
+ } else {
+ TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
+ }
+
+ tlbcam_addrs[index].start = virt;
+ tlbcam_addrs[index].limit = virt + size - 1;
+ tlbcam_addrs[index].phys = phys;
+}
+
+static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
+ phys_addr_t phys)
+{
+ unsigned int camsize = __ilog2(ram);
+ unsigned int align = __ffs(virt | phys);
+ unsigned long max_cam;
+
+ if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
+ /* Convert (4^max) kB to (2^max) bytes */
+ max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
+ camsize &= ~1U;
+ align &= ~1U;
+ } else {
+ /* Convert (2^max) kB to (2^max) bytes */
+ max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
+ }
+
+ if (camsize > align)
+ camsize = align;
+ if (camsize > max_cam)
+ camsize = max_cam;
+
+ return 1UL << camsize;
+}
+
+static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
+ unsigned long ram, int max_cam_idx,
+ bool dryrun, bool init)
+{
+ int i;
+ unsigned long amount_mapped = 0;
+ unsigned long boundary;
+
+ if (strict_kernel_rwx_enabled())
+ boundary = (unsigned long)(_sinittext - _stext);
+ else
+ boundary = ram;
+
+ /* Calculate CAM values */
+ for (i = 0; boundary && i < max_cam_idx; i++) {
+ unsigned long cam_sz;
+ pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;
+
+ cam_sz = calc_cam_sz(boundary, virt, phys);
+ if (!dryrun)
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
+
+ boundary -= cam_sz;
+ amount_mapped += cam_sz;
+ virt += cam_sz;
+ phys += cam_sz;
+ }
+ for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
+ unsigned long cam_sz;
+ pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;
+
+ cam_sz = calc_cam_sz(ram, virt, phys);
+ if (!dryrun)
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
+
+ ram -= cam_sz;
+ amount_mapped += cam_sz;
+ virt += cam_sz;
+ phys += cam_sz;
+ }
+
+ if (dryrun)
+ return amount_mapped;
+
+ if (init) {
+ loadcam_multi(0, i, max_cam_idx);
+ tlbcam_index = i;
+ } else {
+ loadcam_multi(0, i, 0);
+ WARN_ON(i > tlbcam_index);
+ }
+
+#ifdef CONFIG_PPC64
+ get_paca()->tcd.esel_next = i;
+ get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+ get_paca()->tcd.esel_first = i;
+#endif
+
+ return amount_mapped;
+}
+
+unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
+{
+ unsigned long virt = PAGE_OFFSET;
+ phys_addr_t phys = memstart_addr;
+
+ return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
+}
+
+#ifdef CONFIG_PPC32
+
+#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
+#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
+#endif
+
+unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
+{
+ return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
+}
+
+void flush_instruction_cache(void)
+{
+ unsigned long tmp;
+
+ tmp = mfspr(SPRN_L1CSR1);
+ tmp |= L1CSR1_ICFI | L1CSR1_ICLFR;
+ mtspr(SPRN_L1CSR1, tmp);
+ isync();
+}
+
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
+{
+ flush_instruction_cache();
+}
+
+static unsigned long __init tlbcam_sz(int idx)
+{
+ return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
+}
+
+void __init adjust_total_lowmem(void)
+{
+ unsigned long ram;
+ int i;
+
+ /* adjust lowmem size to __max_low_memory */
+ ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
+
+ i = switch_to_as1();
+ __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
+ restore_to_as0(i, 0, NULL, 1);
+
+ pr_info("Memory CAM mapping: ");
+ for (i = 0; i < tlbcam_index - 1; i++)
+ pr_cont("%lu/", tlbcam_sz(i) >> 20);
+ pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
+ (unsigned int)((total_lowmem - __max_low_memory) >> 20));
+
+ memblock_set_current_limit(memstart_addr + __max_low_memory);
+}
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mmu_mark_rodata_ro(void)
+{
+ unsigned long remapped;
+
+ remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
+
+ WARN_ON(__max_low_memory != remapped);
+}
+#endif
+
+void mmu_mark_initmem_nx(void)
+{
+ /* Everything is done in mmu_mark_rodata_ro() */
+}
+
+void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ phys_addr_t first_memblock_size)
+{
+ phys_addr_t limit = first_memblock_base + first_memblock_size;
+
+ /* 64M mapped initially according to head_fsl_booke.S */
+ memblock_set_current_limit(min_t(u64, limit, 0x04000000));
+}
+
+#ifdef CONFIG_RELOCATABLE
+int __initdata is_second_reloc;
+notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
+{
+ unsigned long base = kernstart_virt_addr;
+ phys_addr_t size;
+
+ kernstart_addr = start;
+ if (is_second_reloc) {
+ virt_phys_offset = PAGE_OFFSET - memstart_addr;
+ kaslr_late_init();
+ return;
+ }
+
+ /*
+ * Relocatable kernel support based on processing of dynamic
+ * relocation entries. Before we get the real memstart_addr,
+ * We will compute the virt_phys_offset like this:
+ * virt_phys_offset = stext.run - kernstart_addr
+ *
+ * stext.run = (KERNELBASE & ~0x3ffffff) +
+ * (kernstart_addr & 0x3ffffff)
+ * When we relocate, we have :
+ *
+ * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
+ *
+ * hence:
+ * virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
+ * (kernstart_addr & ~0x3ffffff)
+ *
+ */
+ start &= ~0x3ffffff;
+ base &= ~0x3ffffff;
+ virt_phys_offset = base - start;
+ early_get_first_memblock_info(__va(dt_ptr), &size);
+ /*
+ * We now get the memstart_addr, then we should check if this
+ * address is the same as what the PAGE_OFFSET map to now. If
+ * not we have to change the map of PAGE_OFFSET to memstart_addr
+ * and do a second relocation.
+ */
+ if (start != memstart_addr) {
+ int n;
+ long offset = start - memstart_addr;
+
+ is_second_reloc = 1;
+ n = switch_to_as1();
+ /* map a 64M area for the second relocation */
+ if (memstart_addr > start)
+ map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
+ false, true);
+ else
+ map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
+ 0x4000000, CONFIG_LOWMEM_CAM_NUM,
+ false, true);
+ restore_to_as0(n, offset, __va(dt_ptr), 1);
+ /* We should never reach here */
+ panic("Relocation error");
+ }
+
+ kaslr_early_init(__va(dt_ptr), size);
+}
+#endif
+#endif
diff --git a/arch/powerpc/mm/nohash/e500_hugetlbpage.c b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
new file mode 100644
index 000000000000..c7d4b317a823
--- /dev/null
+++ b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PPC Huge TLB Page Support for Book3E MMU
+ *
+ * Copyright (C) 2009 David Gibson, IBM Corporation.
+ * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
+ *
+ */
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+
+#include <asm/mmu.h>
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
+static inline int tlb1_next(void)
+{
+ struct paca_struct *paca = get_paca();
+ struct tlb_core_data *tcd;
+ int this, next;
+
+ tcd = paca->tcd_ptr;
+ this = tcd->esel_next;
+
+ next = this + 1;
+ if (next >= tcd->esel_max)
+ next = tcd->esel_first;
+
+ tcd->esel_next = next;
+ return this;
+}
+
+static inline void book3e_tlb_lock(void)
+{
+ struct paca_struct *paca = get_paca();
+ unsigned long tmp;
+ int token = smp_processor_id() + 1;
+
+ /*
+ * Besides being unnecessary in the absence of SMT, this
+ * check prevents trying to do lbarx/stbcx. on e5500 which
+ * doesn't implement either feature.
+ */
+ if (!cpu_has_feature(CPU_FTR_SMT))
+ return;
+
+ asm volatile("1: lbarx %0, 0, %1;"
+ "cmpwi %0, 0;"
+ "bne 2f;"
+ "stbcx. %2, 0, %1;"
+ "bne 1b;"
+ "b 3f;"
+ "2: lbzx %0, 0, %1;"
+ "cmpwi %0, 0;"
+ "bne 2b;"
+ "b 1b;"
+ "3:"
+ : "=&r" (tmp)
+ : "r" (&paca->tcd_ptr->lock), "r" (token)
+ : "memory");
+}
+
+static inline void book3e_tlb_unlock(void)
+{
+ struct paca_struct *paca = get_paca();
+
+ if (!cpu_has_feature(CPU_FTR_SMT))
+ return;
+
+ isync();
+ paca->tcd_ptr->lock = 0;
+}
+#else
+static inline int tlb1_next(void)
+{
+ int index, ncams;
+
+ ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+
+ index = this_cpu_read(next_tlbcam_idx);
+
+ /* Just round-robin the entries and wrap when we hit the end */
+ if (unlikely(index == ncams - 1))
+ __this_cpu_write(next_tlbcam_idx, tlbcam_index);
+ else
+ __this_cpu_inc(next_tlbcam_idx);
+
+ return index;
+}
+
+static inline void book3e_tlb_lock(void)
+{
+}
+
+static inline void book3e_tlb_unlock(void)
+{
+}
+#endif
+
+static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
+{
+ int found = 0;
+
+ mtspr(SPRN_MAS6, pid << 16);
+ asm volatile(
+ "tlbsx 0,%1\n"
+ "mfspr %0,0x271\n"
+ "srwi %0,%0,31\n"
+ : "=&r"(found) : "r"(ea));
+
+ return found;
+}
+
+static void
+book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
+{
+ unsigned long mas1, mas2;
+ u64 mas7_3;
+ unsigned long psize, tsize, shift;
+ unsigned long flags;
+ struct mm_struct *mm;
+ int index;
+
+ if (unlikely(is_kernel_addr(ea)))
+ return;
+
+ mm = vma->vm_mm;
+
+ psize = vma_mmu_pagesize(vma);
+ shift = __ilog2(psize);
+ tsize = shift - 10;
+ /*
+ * We can't be interrupted while we're setting up the MAS
+ * registers or after we've confirmed that no tlb exists.
+ */
+ local_irq_save(flags);
+
+ book3e_tlb_lock();
+
+ if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
+ book3e_tlb_unlock();
+ local_irq_restore(flags);
+ return;
+ }
+
+ /* We have to use the CAM(TLB1) on FSL parts for hugepages */
+ index = tlb1_next();
+ mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
+
+ mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
+ mas2 = ea & ~((1UL << shift) - 1);
+ mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
+ mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
+ mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
+ if (!pte_dirty(pte))
+ mas7_3 &= ~(MAS3_SW|MAS3_UW);
+
+ mtspr(SPRN_MAS1, mas1);
+ mtspr(SPRN_MAS2, mas2);
+
+ if (mmu_has_feature(MMU_FTR_BIG_PHYS))
+ mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
+ mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
+
+ asm volatile ("tlbwe");
+
+ book3e_tlb_unlock();
+ local_irq_restore(flags);
+}
+
+/*
+ * This is called at the end of handling a user page fault, when the
+ * fault has been handled by updating a PTE in the linux page tables.
+ *
+ * This must always be called with the pte lock held.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ if (is_vm_hugetlb_page(vma))
+ book3e_hugetlb_preload(vma, address, *ptep);
+}
+
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ struct hstate *hstate = hstate_file(vma->vm_file);
+ unsigned long tsize = huge_page_shift(hstate) - 10;
+
+ __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
+}
diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c
deleted file mode 100644
index b231a54f540c..000000000000
--- a/arch/powerpc/mm/nohash/fsl_book3e.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Modifications by Kumar Gala (galak@kernel.crashing.org) to support
- * E500 Book E processors.
- *
- * Copyright 2004,2010 Freescale Semiconductor, Inc.
- *
- * This file contains the routines for initializing the MMU
- * on the 4xx series of chips.
- * -- paulus
- *
- * Derived from arch/ppc/mm/init.c:
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
- * and Cort Dougan (PReP) (cort@cs.nmt.edu)
- * Copyright (C) 1996 Paul Mackerras
- *
- * Derived from "arch/i386/mm/init.c"
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/stddef.h>
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/highmem.h>
-#include <linux/memblock.h>
-
-#include <asm/prom.h>
-#include <asm/io.h>
-#include <asm/mmu_context.h>
-#include <asm/mmu.h>
-#include <linux/uaccess.h>
-#include <asm/smp.h>
-#include <asm/machdep.h>
-#include <asm/setup.h>
-#include <asm/paca.h>
-
-#include <mm/mmu_decl.h>
-
-unsigned int tlbcam_index;
-
-#define NUM_TLBCAMS (64)
-struct tlbcam TLBCAM[NUM_TLBCAMS];
-
-struct tlbcamrange {
- unsigned long start;
- unsigned long limit;
- phys_addr_t phys;
-} tlbcam_addrs[NUM_TLBCAMS];
-
-unsigned long tlbcam_sz(int idx)
-{
- return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
-}
-
-#ifdef CONFIG_FSL_BOOKE
-/*
- * Return PA for this VA if it is mapped by a CAM, or 0
- */
-phys_addr_t v_block_mapped(unsigned long va)
-{
- int b;
- for (b = 0; b < tlbcam_index; ++b)
- if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
- return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
- return 0;
-}
-
-/*
- * Return VA for a given PA or 0 if not mapped
- */
-unsigned long p_block_mapped(phys_addr_t pa)
-{
- int b;
- for (b = 0; b < tlbcam_index; ++b)
- if (pa >= tlbcam_addrs[b].phys
- && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
- +tlbcam_addrs[b].phys)
- return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
- return 0;
-}
-#endif
-
-/*
- * Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
- * in particular size must be a power of 4 between 4k and the max supported by
- * an implementation; max may further be limited by what can be represented in
- * an unsigned long (for example, 32-bit implementations cannot support a 4GB
- * size).
- */
-static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
- unsigned long size, unsigned long flags, unsigned int pid)
-{
- unsigned int tsize;
-
- tsize = __ilog2(size) - 10;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
- if ((flags & _PAGE_NO_CACHE) == 0)
- flags |= _PAGE_COHERENT;
-#endif
-
- TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
- TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
- TLBCAM[index].MAS2 = virt & PAGE_MASK;
-
- TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
- TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
- TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
- TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
- TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
-
- TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
- TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
- if (mmu_has_feature(MMU_FTR_BIG_PHYS))
- TLBCAM[index].MAS7 = (u64)phys >> 32;
-
- /* Below is unlikely -- only for large user pages or similar */
- if (pte_user(__pte(flags))) {
- TLBCAM[index].MAS3 |= MAS3_UR;
- TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
- TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
- } else {
- TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
- }
-
- tlbcam_addrs[index].start = virt;
- tlbcam_addrs[index].limit = virt + size - 1;
- tlbcam_addrs[index].phys = phys;
-}
-
-unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
- phys_addr_t phys)
-{
- unsigned int camsize = __ilog2(ram);
- unsigned int align = __ffs(virt | phys);
- unsigned long max_cam;
-
- if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
- /* Convert (4^max) kB to (2^max) bytes */
- max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
- camsize &= ~1U;
- align &= ~1U;
- } else {
- /* Convert (2^max) kB to (2^max) bytes */
- max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
- }
-
- if (camsize > align)
- camsize = align;
- if (camsize > max_cam)
- camsize = max_cam;
-
- return 1UL << camsize;
-}
-
-static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
- unsigned long ram, int max_cam_idx,
- bool dryrun, bool init)
-{
- int i;
- unsigned long amount_mapped = 0;
- unsigned long boundary;
-
- if (strict_kernel_rwx_enabled())
- boundary = (unsigned long)(_sinittext - _stext);
- else
- boundary = ram;
-
- /* Calculate CAM values */
- for (i = 0; boundary && i < max_cam_idx; i++) {
- unsigned long cam_sz;
- pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;
-
- cam_sz = calc_cam_sz(boundary, virt, phys);
- if (!dryrun)
- settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
-
- boundary -= cam_sz;
- amount_mapped += cam_sz;
- virt += cam_sz;
- phys += cam_sz;
- }
- for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
- unsigned long cam_sz;
- pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;
-
- cam_sz = calc_cam_sz(ram, virt, phys);
- if (!dryrun)
- settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
-
- ram -= cam_sz;
- amount_mapped += cam_sz;
- virt += cam_sz;
- phys += cam_sz;
- }
-
- if (dryrun)
- return amount_mapped;
-
- if (init) {
- loadcam_multi(0, i, max_cam_idx);
- tlbcam_index = i;
- } else {
- loadcam_multi(0, i, 0);
- WARN_ON(i > tlbcam_index);
- }
-
-#ifdef CONFIG_PPC64
- get_paca()->tcd.esel_next = i;
- get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
- get_paca()->tcd.esel_first = i;
-#endif
-
- return amount_mapped;
-}
-
-unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
-{
- unsigned long virt = PAGE_OFFSET;
- phys_addr_t phys = memstart_addr;
-
- return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
-}
-
-#ifdef CONFIG_PPC32
-
-#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
-#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
-#endif
-
-unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
-{
- return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
-}
-
-void flush_instruction_cache(void)
-{
- unsigned long tmp;
-
- tmp = mfspr(SPRN_L1CSR1);
- tmp |= L1CSR1_ICFI | L1CSR1_ICLFR;
- mtspr(SPRN_L1CSR1, tmp);
- isync();
-}
-
-/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
- */
-void __init MMU_init_hw(void)
-{
- flush_instruction_cache();
-}
-
-void __init adjust_total_lowmem(void)
-{
- unsigned long ram;
- int i;
-
- /* adjust lowmem size to __max_low_memory */
- ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
-
- i = switch_to_as1();
- __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
- restore_to_as0(i, 0, 0, 1);
-
- pr_info("Memory CAM mapping: ");
- for (i = 0; i < tlbcam_index - 1; i++)
- pr_cont("%lu/", tlbcam_sz(i) >> 20);
- pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
- (unsigned int)((total_lowmem - __max_low_memory) >> 20));
-
- memblock_set_current_limit(memstart_addr + __max_low_memory);
-}
-
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void mmu_mark_rodata_ro(void)
-{
- /* Everything is done in mmu_mark_initmem_nx() */
-}
-#endif
-
-void mmu_mark_initmem_nx(void)
-{
- unsigned long remapped;
-
- if (!strict_kernel_rwx_enabled())
- return;
-
- remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
-
- WARN_ON(__max_low_memory != remapped);
-}
-
-void setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size)
-{
- phys_addr_t limit = first_memblock_base + first_memblock_size;
-
- /* 64M mapped initially according to head_fsl_booke.S */
- memblock_set_current_limit(min_t(u64, limit, 0x04000000));
-}
-
-#ifdef CONFIG_RELOCATABLE
-int __initdata is_second_reloc;
-notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
-{
- unsigned long base = kernstart_virt_addr;
- phys_addr_t size;
-
- kernstart_addr = start;
- if (is_second_reloc) {
- virt_phys_offset = PAGE_OFFSET - memstart_addr;
- kaslr_late_init();
- return;
- }
-
- /*
- * Relocatable kernel support based on processing of dynamic
- * relocation entries. Before we get the real memstart_addr,
- * We will compute the virt_phys_offset like this:
- * virt_phys_offset = stext.run - kernstart_addr
- *
- * stext.run = (KERNELBASE & ~0x3ffffff) +
- * (kernstart_addr & 0x3ffffff)
- * When we relocate, we have :
- *
- * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
- *
- * hence:
- * virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
- * (kernstart_addr & ~0x3ffffff)
- *
- */
- start &= ~0x3ffffff;
- base &= ~0x3ffffff;
- virt_phys_offset = base - start;
- early_get_first_memblock_info(__va(dt_ptr), &size);
- /*
- * We now get the memstart_addr, then we should check if this
- * address is the same as what the PAGE_OFFSET map to now. If
- * not we have to change the map of PAGE_OFFSET to memstart_addr
- * and do a second relocation.
- */
- if (start != memstart_addr) {
- int n;
- long offset = start - memstart_addr;
-
- is_second_reloc = 1;
- n = switch_to_as1();
- /* map a 64M area for the second relocation */
- if (memstart_addr > start)
- map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
- false, true);
- else
- map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
- 0x4000000, CONFIG_LOWMEM_CAM_NUM,
- false, true);
- restore_to_as0(n, offset, __va(dt_ptr), 1);
- /* We should never reach here */
- panic("Relocation error");
- }
-
- kaslr_early_init(__va(dt_ptr), size);
-}
-#endif
-#endif
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 6ec978967da0..0d04f9d5da8d 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -14,11 +14,11 @@
#include <linux/memblock.h>
#include <linux/libfdt.h>
#include <linux/crash_core.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/cacheflush.h>
-#include <asm/prom.h>
#include <asm/kdump.h>
#include <mm/mmu_decl.h>
-#include <generated/compile.h>
#include <generated/utsrelease.h>
struct regions {
@@ -36,17 +36,11 @@ struct regions {
int reserved_mem_size_cells;
};
-/* Simplified build-specific string for starting entropy. */
-static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
- LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
-
struct regions __initdata regions;
static __init void kaslr_get_cmdline(void *fdt)
{
- int node = fdt_path_offset(fdt, "/chosen");
-
- early_init_dt_scan_chosen(node, "chosen", 1, boot_command_line);
+ early_init_dt_scan_chosen(boot_command_line);
}
static unsigned long __init rotate_xor(unsigned long hash, const void *area,
@@ -72,7 +66,8 @@ static unsigned long __init get_boot_seed(void *fdt)
{
unsigned long hash = 0;
- hash = rotate_xor(hash, build_str, sizeof(build_str));
+ /* build-specific string for starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
return hash;
@@ -317,7 +312,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
linear_sz = min_t(unsigned long, ram, SZ_512M);
- /* If the linear size is smaller than 64M, do not randmize */
+ /* If the linear size is smaller than 64M, do not randomize */
if (linear_sz < SZ_64M)
return 0;
diff --git a/arch/powerpc/mm/nohash/kup.c b/arch/powerpc/mm/nohash/kup.c
new file mode 100644
index 000000000000..552becf90e97
--- /dev/null
+++ b/arch/powerpc/mm/nohash/kup.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file contains the routines for initializing kernel userspace protection
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include <asm/kup.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_PPC_KUAP
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
+void setup_kuap(bool disabled)
+{
+ if (disabled) {
+ if (IS_ENABLED(CONFIG_40x))
+ disable_kuep = true;
+ if (smp_processor_id() == boot_cpuid)
+ static_branch_enable(&disable_kuap_key);
+ return;
+ }
+
+ pr_info("Activating Kernel Userspace Access Protection\n");
+
+ __prevent_user_access(KUAP_READ_WRITE);
+}
+#endif
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c
index 44b2b5e7cabe..ccd5819b1bd9 100644
--- a/arch/powerpc/mm/nohash/mmu_context.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -33,6 +33,7 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/smp.h>
+#include <asm/kup.h>
#include <mm/mmu_decl.h>
@@ -217,7 +218,7 @@ static void set_context(unsigned long id, pgd_t *pgd)
/* sync */
mb();
- } else {
+ } else if (kuap_is_disabled()) {
if (IS_ENABLED(CONFIG_40x))
mb(); /* sync */
@@ -305,6 +306,9 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
if (IS_ENABLED(CONFIG_BDI_SWITCH))
abatron_pteptrs[1] = next->pgd;
set_context(id, next->pgd);
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ tsk->thread.pid = id;
+#endif
raw_spin_unlock(&context_lock);
}
@@ -313,15 +317,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
*/
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
- /*
- * We have MMU_NO_CONTEXT set to be ~0. Hence check
- * explicitly against context.id == 0. This ensures that we properly
- * initialize context slice details for newly allocated mm's (which will
- * have id == 0) and don't alter context slice inherited via fork (which
- * will have id != 0).
- */
- if (mm->context.id == 0)
- slice_init_new_context_exec(mm);
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
pte_frag_set(&mm->context, NULL);
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index 647bf454a0fa..2c15c86c7015 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -49,8 +49,7 @@
* other sizes not listed here. The .ind field is only used on MMUs that have
* indirect page table entries.
*/
-#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
.shift = 12,
@@ -81,7 +80,20 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.enc = BOOK3E_PAGESZ_1GB,
},
};
-#elif defined(CONFIG_PPC_8xx)
+
+static inline int mmu_get_tsize(int psize)
+{
+ return mmu_psize_defs[psize].enc;
+}
+#else
+static inline int mmu_get_tsize(int psize)
+{
+ /* This isn't used on !Book3E for now */
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PPC_8xx
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
.shift = 12,
@@ -96,53 +108,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.shift = 23,
},
};
-#else
-struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
- [MMU_PAGE_4K] = {
- .shift = 12,
- .ind = 20,
- .enc = BOOK3E_PAGESZ_4K,
- },
- [MMU_PAGE_16K] = {
- .shift = 14,
- .enc = BOOK3E_PAGESZ_16K,
- },
- [MMU_PAGE_64K] = {
- .shift = 16,
- .ind = 28,
- .enc = BOOK3E_PAGESZ_64K,
- },
- [MMU_PAGE_1M] = {
- .shift = 20,
- .enc = BOOK3E_PAGESZ_1M,
- },
- [MMU_PAGE_16M] = {
- .shift = 24,
- .ind = 36,
- .enc = BOOK3E_PAGESZ_16M,
- },
- [MMU_PAGE_256M] = {
- .shift = 28,
- .enc = BOOK3E_PAGESZ_256M,
- },
- [MMU_PAGE_1G] = {
- .shift = 30,
- .enc = BOOK3E_PAGESZ_1GB,
- },
-};
-#endif /* CONFIG_FSL_BOOKE */
-
-static inline int mmu_get_tsize(int psize)
-{
- return mmu_psize_defs[psize].enc;
-}
-#else
-static inline int mmu_get_tsize(int psize)
-{
- /* This isn't used on !Book3E for now */
- return 0;
-}
-#endif /* CONFIG_PPC_BOOK3E_MMU */
+#endif
/* The variables below are currently only used on 64-bit Book3E
* though this will probably be made common with other nohash
@@ -150,7 +116,6 @@ static inline int mmu_get_tsize(int psize)
*/
#ifdef CONFIG_PPC64
-int mmu_linear_psize; /* Page size used for the linear mapping */
int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
@@ -167,7 +132,7 @@ int extlb_level_exc;
#endif /* CONFIG_PPC64 */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU(int, next_tlbcam_idx);
EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
@@ -359,6 +324,7 @@ void __init early_init_mmu_47x(void)
/*
* Flush kernel TLB entries in the given range
*/
+#ifndef CONFIG_PPC_8xx
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
@@ -371,6 +337,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
#endif
}
EXPORT_SYMBOL(flush_tlb_kernel_range);
+#endif
/*
* Currently, for range flushing, we just do a full mm flush. This should
@@ -433,14 +400,14 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
}
}
-static void setup_page_sizes(void)
+static void __init setup_page_sizes(void)
{
unsigned int tlb0cfg;
unsigned int tlb0ps;
unsigned int eptcfg;
int i, psize;
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
@@ -571,7 +538,7 @@ out:
}
}
-static void setup_mmu_htw(void)
+static void __init setup_mmu_htw(void)
{
/*
* If we want to use HW tablewalk, enable it by patching the TLB miss
@@ -583,7 +550,7 @@ static void setup_mmu_htw(void)
patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
break;
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
case PPC_HTW_E6500:
extlb_level_exc = EX_TLB_SIZE;
patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
@@ -626,7 +593,7 @@ static void early_init_this_mmu(void)
}
mtspr(SPRN_MAS4, mas4);
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
bool map = true;
@@ -657,14 +624,6 @@ static void early_init_this_mmu(void)
static void __init early_init_mmu_global(void)
{
- /* XXX This will have to be decided at runtime, but right
- * now our boot and TLB miss code hard wires it. Ideally
- * we should find out a suitable page size and patch the
- * TLB miss code (either that or use the PACA to store
- * the value we want)
- */
- mmu_linear_psize = MMU_PAGE_1G;
-
/* XXX This should be decided at runtime based on supported
* page sizes in the TLB, but for now let's assume 16M is
* always there and a good fit (which it probably is)
@@ -687,7 +646,7 @@ static void __init early_init_mmu_global(void)
/* Look for HW tablewalk support */
setup_mmu_htw();
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
if (book3e_htw_mode == PPC_HTW_NONE) {
extlb_level_exc = EX_TLB_SIZE;
@@ -708,7 +667,7 @@ static void __init early_init_mmu_global(void)
static void __init early_mmu_set_memory_limit(void)
{
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
/*
* Limit memory so we dont have linear faults.
@@ -757,7 +716,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
* We crop it to the size of the first MEMBLOCK to
* avoid going over total available memory just in case...
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned long linear_sz;
unsigned int num_cams;
@@ -782,9 +741,5 @@ void __init early_init_mmu(void)
#ifdef CONFIG_PPC_47x
early_init_mmu_47x();
#endif
-
-#ifdef CONFIG_PPC_MM_SLICES
- mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
-#endif
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index dd39074de9af..e1199608ff4d 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -186,7 +186,7 @@ _GLOBAL(_tlbivax_bcast)
isync
PPC_TLBIVAX(0, R3)
isync
- eieio
+ mbar
tlbsync
BEGIN_FTR_SECTION
b 1f
@@ -221,7 +221,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
blr
#endif /* CONFIG_PPC_47x */
-#elif defined(CONFIG_FSL_BOOKE)
+#elif defined(CONFIG_PPC_85xx)
/*
* FSL BookE implementations.
*
@@ -294,7 +294,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
isync
1: wrtee r10
blr
-#elif defined(CONFIG_PPC_BOOK3E)
+#elif defined(CONFIG_PPC_BOOK3E_64)
/*
* New Book3E (>= 2.06) implementation
*
@@ -355,7 +355,7 @@ _GLOBAL(_tlbivax_bcast)
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
PPC_TLBIVAX(0,R3)
- eieio
+ mbar
tlbsync
sync
wrtee r10
@@ -364,7 +364,7 @@ _GLOBAL(_tlbivax_bcast)
#error Unsupported processor type !
#endif
-#if defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_E500)
/*
* extern void loadcam_entry(unsigned int index)
*
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 9235e720e357..76cf456d7976 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -61,7 +61,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
ld r14,PACAPGD(r13)
std r15,EX_TLB_R15(r12)
std r10,EX_TLB_CR(r12)
-#ifdef CONFIG_PPC_FSL_BOOK3E
START_BTB_FLUSH_SECTION
mfspr r11, SPRN_SRR1
andi. r10,r11,MSR_PR
@@ -70,14 +69,11 @@ START_BTB_FLUSH_SECTION
1:
END_BTB_FLUSH_SECTION
std r7,EX_TLB_R7(r12)
-#endif
.endm
.macro tlb_epilog_bolted
ld r14,EX_TLB_CR(r12)
-#ifdef CONFIG_PPC_FSL_BOOK3E
ld r7,EX_TLB_R7(r12)
-#endif
ld r10,EX_TLB_R10(r12)
ld r11,EX_TLB_R11(r12)
ld r13,EX_TLB_R13(r12)
@@ -128,6 +124,13 @@ END_BTB_FLUSH_SECTION
bne tlb_miss_kernel_bolted
+tlb_miss_user_bolted:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- tlb_miss_fault_bolted /* KUAP fault */
+#endif
+
tlb_miss_common_bolted:
/*
* This is the guts of the TLB miss handler for bolted-linear.
@@ -145,16 +148,7 @@ tlb_miss_common_bolted:
clrrdi r15,r15,3
beq tlb_miss_fault_bolted /* No PGDIR, bail */
-BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and search for existing entry. Then load
- * the entry.
- */
- PPC_TLBSRX_DOT(0,R16)
- ldx r14,r14,r15 /* grab pgd entry */
- beq tlb_miss_done_bolted /* tlb exists already, bail */
-MMU_FTR_SECTION_ELSE
ldx r14,r14,r15 /* grab pgd entry */
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3
@@ -215,10 +209,11 @@ itlb_miss_kernel_bolted:
tlb_miss_kernel_bolted:
mfspr r10,SPRN_MAS1
ld r14,PACA_KERNELPGD(r13)
- cmpldi cr0,r15,8 /* Check for vmalloc region */
+ srdi r15,r16,44 /* get kernel region */
+ andi. r15,r15,1 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ tlb_miss_common_bolted
+ bne+ tlb_miss_common_bolted
tlb_miss_fault_bolted:
/* We need to check if it was an instruction miss */
@@ -246,10 +241,9 @@ itlb_miss_fault_bolted:
cmpldi cr0,r15,0 /* Check for user region */
oris r11,r11,_PAGE_ACCESSED@h
- beq tlb_miss_common_bolted
+ beq tlb_miss_user_bolted
b itlb_miss_kernel_bolted
-#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* TLB miss handling for e6500 and derivatives, using hardware tablewalk.
*
@@ -500,7 +494,9 @@ tlb_miss_huge_e6500:
tlb_miss_kernel_e6500:
ld r14,PACA_KERNELPGD(r13)
- cmpldi cr1,r15,8 /* Check for vmalloc region */
+ srdi r15,r16,44 /* get kernel region */
+ xoris r15,r15,0xc /* Check for vmalloc region */
+ cmplwi cr1,r15,1
beq+ cr1,tlb_miss_common_e6500
tlb_miss_fault_e6500:
@@ -514,7 +510,6 @@ dtlb_miss_fault_e6500:
itlb_miss_fault_e6500:
tlb_epilog_bolted
b exc_instruction_storage_book3e
-#endif /* CONFIG_PPC_FSL_BOOK3E */
/**********************************************************************
* *
@@ -534,16 +529,18 @@ itlb_miss_fault_e6500:
*/
mfspr r14,SPRN_ESR
mfspr r16,SPRN_DEAR /* get faulting address */
- srdi r15,r16,60 /* get region */
- cmpldi cr0,r15,0xc /* linear mapping ? */
+ srdi r15,r16,44 /* get region */
+ xoris r15,r15,0xc
+ cmpldi cr0,r15,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r15,1 /* vmalloc mapping ? */
/* The page tables are mapped virtually linear. At this point, though,
* we don't know whether we are trying to fault in a first level
* virtual address or a virtual page table address. We can get that
* from bit 0x1 of the region ID which we have set for a page table
*/
- andi. r10,r15,0x1
+ andis. r10,r15,0x1
bne- virt_page_table_tlb_miss
std r14,EX_TLB_ESR(r12); /* save ESR */
@@ -555,7 +552,7 @@ itlb_miss_fault_e6500:
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r15,0 /* Check for user region */
+ srdi. r15,r16,60 /* Check for user region */
/* We pre-test some combination of permissions to avoid double
* faults:
@@ -576,13 +573,12 @@ itlb_miss_fault_e6500:
*/
rlwimi r11,r14,32-19,27,27
rlwimi r11,r14,32-16,19,19
- beq normal_tlb_miss
+ beq normal_tlb_miss_user
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ normal_tlb_miss
+ beq+ cr1,normal_tlb_miss
/* We got a crappy address, just fault with whatever DEAR and ESR
* are here
@@ -608,27 +604,28 @@ itlb_miss_fault_e6500:
*
* Faulting address is SRR0 which is already in r16
*/
- srdi r15,r16,60 /* get region */
- cmpldi cr0,r15,0xc /* linear mapping ? */
+ srdi r15,r16,44 /* get region */
+ xoris r15,r15,0xc
+ cmpldi cr0,r15,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r15,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */
oris r11,r11,_PAGE_ACCESSED@h
- cmpldi cr0,r15,0 /* Check for user region */
+ srdi. r15,r16,60 /* Check for user region */
std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
- beq normal_tlb_miss
+ beq normal_tlb_miss_user
li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
oris r11,r11,_PAGE_ACCESSED@h
/* XXX replace the RMW cycles with immediate loads + writes */
mfspr r10,SPRN_MAS1
- cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ normal_tlb_miss
+ beq+ cr1,normal_tlb_miss
/* We got a crappy address, just fault */
TLB_MISS_EPILOG_ERROR
@@ -646,6 +643,12 @@ itlb_miss_fault_e6500:
* r11 = PTE permission mask
* r10 = crap (free to use)
*/
+normal_tlb_miss_user:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r14,SPRN_MAS1
+ rlwinm. r14,r14,0,0x3fff0000
+ beq- normal_tlb_miss_access_fault /* KUAP fault */
+#endif
normal_tlb_miss:
/* So we first construct the page table address. We do that by
* shifting the bottom of the address (not the region ID) by
@@ -655,22 +658,14 @@ normal_tlb_miss:
* NOTE: For 64K pages, we do things slightly differently in
* order to handle the weird page table format used by linux
*/
- ori r10,r15,0x1
+ srdi r15,r16,44
+ oris r10,r15,0x1
rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
- sldi r15,r10,60
- clrrdi r14,r14,3
+ sldi r15,r10,44
+ clrrdi r14,r14,19
or r10,r15,r14
-BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and search for existing entry. Then load
- * the entry.
- */
- PPC_TLBSRX_DOT(0,R16)
- ld r14,0(r10)
- beq normal_tlb_miss_done
-MMU_FTR_SECTION_ELSE
ld r14,0(r10)
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
finish_normal_tlb_miss:
/* Check if required permissions are met */
@@ -689,13 +684,13 @@ finish_normal_tlb_miss:
*
* TODO: mix up code below for better scheduling
*/
- clrrdi r11,r16,12 /* Clear low crap in EA */
- rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
- mtspr SPRN_MAS2,r11
+ clrrdi r10,r16,12 /* Clear low crap in EA */
+ rlwimi r10,r14,32-19,27,31 /* Insert WIMGE */
+ mtspr SPRN_MAS2,r10
/* Check page size, if not standard, update MAS1 */
- rldicl r11,r14,64-8,64-8
- cmpldi cr0,r11,BOOK3E_PAGESZ_4K
+ rldicl r10,r14,64-8,64-8
+ cmpldi cr0,r10,BOOK3E_PAGESZ_4K
beq- 1f
mfspr r11,SPRN_MAS1
rlwimi r11,r14,31,21,24
@@ -714,13 +709,9 @@ finish_normal_tlb_miss:
li r11,MAS3_SW|MAS3_UW
andc r15,r15,r11
1:
-BEGIN_MMU_FTR_SECTION
srdi r16,r15,32
mtspr SPRN_MAS3,r15
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r15
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -772,6 +763,7 @@ normal_tlb_miss_access_fault:
*/
virt_page_table_tlb_miss:
/* Are we hitting a kernel page table ? */
+ srdi r15,r16,60
andi. r10,r15,0x8
/* The cool thing now is that r10 contains 0 for user and 8 for kernel,
@@ -786,19 +778,22 @@ virt_page_table_tlb_miss:
mfspr r10,SPRN_MAS1
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
+#ifdef CONFIG_PPC_KUAP
+ b 2f
1:
-BEGIN_MMU_FTR_SECTION
- /* Search if we already have a TLB entry for that virtual address, and
- * if we do, bail out.
- */
- PPC_TLBSRX_DOT(0,R16)
- beq virt_page_table_tlb_miss_done
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- virt_page_table_tlb_miss_fault /* KUAP fault */
+2:
+#else
+1:
+#endif
/* Now, we need to walk the page tables. First check if we are in
* range.
*/
- rldicl. r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
+ rldicl r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
+ cmpldi r10,0x80
bne- virt_page_table_tlb_miss_fault
/* Get the PGD pointer */
@@ -844,41 +839,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
clrldi r11,r15,4 /* remove region ID from RPN */
ori r10,r11,1 /* Or-in SR */
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
-BEGIN_MMU_FTR_SECTION
-virt_page_table_tlb_miss_done:
-
- /* We have overridden MAS2:EPN but currently our primary TLB miss
- * handler will always restore it so that should not be an issue,
- * if we ever optimize the primary handler to not write MAS2 on
- * some cases, we'll have to restore MAS2:EPN here based on the
- * original fault's DEAR. If we do that we have to modify the
- * ITLB miss handler to also store SRR0 in the exception frame
- * as DEAR.
- *
- * However, one nasty thing we did is we cleared the reservation
- * (well, potentially we did). We do a trick here thus if we
- * are not a level 0 exception (we interrupted the TLB miss) we
- * offset the return address by -4 in order to replay the tlbsrx
- * instruction there
- */
- subf r10,r13,r12
- cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
- bne- 1f
- ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
- addi r10,r11,-4
- std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
-1:
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
/* Return to caller, normal case */
TLB_MISS_EPILOG_SUCCESS
rfi
@@ -946,23 +912,24 @@ virt_page_table_tlb_miss_whacko_fault:
*/
mfspr r14,SPRN_ESR
mfspr r16,SPRN_DEAR /* get faulting address */
- srdi r11,r16,60 /* get region */
- cmpldi cr0,r11,0xc /* linear mapping ? */
+ srdi r11,r16,44 /* get region */
+ xoris r11,r11,0xc
+ cmpldi cr0,r11,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r11,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r11,0 /* Check for user region */
+ srdi. r11,r16,60 /* Check for user region */
ld r15,PACAPGD(r13) /* Load user pgdir */
beq htw_tlb_miss
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r11,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
- beq+ htw_tlb_miss
+ beq+ cr1,htw_tlb_miss
/* We got a crappy address, just fault with whatever DEAR and ESR
* are here
@@ -988,19 +955,20 @@ virt_page_table_tlb_miss_whacko_fault:
*
* Faulting address is SRR0 which is already in r16
*/
- srdi r11,r16,60 /* get region */
- cmpldi cr0,r11,0xc /* linear mapping ? */
+ srdi r11,r16,44 /* get region */
+ xoris r11,r11,0xc
+ cmpldi cr0,r11,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r11,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r11,0 /* Check for user region */
+ srdi. r11,r16,60 /* Check for user region */
ld r15,PACAPGD(r13) /* Load user pgdir */
beq htw_tlb_miss
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r11,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
@@ -1027,6 +995,11 @@ virt_page_table_tlb_miss_whacko_fault:
* avoid too much complication, it will save/restore things for us
*/
htw_tlb_miss:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- htw_tlb_miss_fault /* KUAP fault */
+#endif
/* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out.
*
@@ -1088,13 +1061,9 @@ htw_tlb_miss:
*/
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -1149,8 +1118,8 @@ tlb_load_linear:
* we only use 1G pages for now. That might have to be changed in a
* final implementation, especially when dealing with hypervisors
*/
- ld r11,PACATOC(r13)
- ld r11,linear_map_top@got(r11)
+ __LOAD_PACA_TOC(r11)
+ LOAD_REG_ADDR_ALTTOC(r11, r11, linear_map_top)
ld r10,0(r11)
tovirt(10,10)
cmpld cr0,r16,r10
@@ -1175,13 +1144,9 @@ tlb_load_linear:
clrldi r10,r10,4 /* clear region bits */
ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe