aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-06 10:43:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-06 10:43:28 -0700
commitcc07aabc53978ae09a1d539237189f7c9841060a (patch)
tree6f47580d19ab5ad85f319bdb260615e991a93399 /arch/arm64/mm/mmu.c
parentFix ARM merge mistake in mvebu board file (diff)
parentarm64: kernel: initialize broadcast hrtimer based clock event device (diff)
downloadlinux-dev-cc07aabc53978ae09a1d539237189f7c9841060a.tar.xz
linux-dev-cc07aabc53978ae09a1d539237189f7c9841060a.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into next
Pull arm64 updates from Catalin Marinas: - Optimised assembly string/memory routines (based on the AArch64 Cortex Strings library contributed to glibc but re-licensed under GPLv2) - Optimised crypto algorithms making use of the ARMv8 crypto extensions (together with kernel API for using FPSIMD instructions in interrupt context) - Ftrace support - CPU topology parsing from DT - ESR_EL1 (Exception Syndrome Register) exposed to user space signal handlers for SIGSEGV/SIGBUS (useful to emulation tools like Qemu) - 1GB section linear mapping if applicable - Barriers usage clean-up - Default pgprot clean-up Conflicts as per Catalin. * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (57 commits) arm64: kernel: initialize broadcast hrtimer based clock event device arm64: ftrace: Add system call tracepoint arm64: ftrace: Add CALLER_ADDRx macros arm64: ftrace: Add dynamic ftrace support arm64: Add ftrace support ftrace: Add arm64 support to recordmcount arm64: Add 'notrace' attribute to unwind_frame() for ftrace arm64: add __ASSEMBLY__ in asm/insn.h arm64: Fix linker script entry point arm64: lib: Implement optimized string length routines arm64: lib: Implement optimized string compare routines arm64: lib: Implement optimized memcmp routine arm64: lib: Implement optimized memset routine arm64: lib: Implement optimized memmove routine arm64: lib: Implement optimized memcpy routine arm64: defconfig: enable a few more common/useful options in defconfig ftrace: Make CALLER_ADDRx macros more generic arm64: Fix deadlock scenario with smp_send_stop() arm64: Fix machine_shutdown() definition arm64: Support arch_irq_work_raise() via self IPIs ...
Diffstat (limited to 'arch/arm64/mm/mmu.c')
-rw-r--r--arch/arm64/mm/mmu.c67
1 files changed, 30 insertions, 37 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 4a829a210bb6..c43f1dd19489 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,11 +43,6 @@
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-pgprot_t pgprot_default;
-EXPORT_SYMBOL(pgprot_default);
-
-static pmdval_t prot_sect_kernel;
-
struct cachepolicy {
const char policy[16];
u64 mair;
@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p)
}
early_param("cachepolicy", early_cachepolicy);
-/*
- * Adjust the PMD section entries according to the CPU in use.
- */
-void __init init_mem_pgprot(void)
-{
- pteval_t default_pgprot;
- int i;
-
- default_pgprot = PTE_ATTRINDX(MT_NORMAL);
- prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
-
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- default_pgprot |= PTE_SHARED;
- prot_sect_kernel |= PMD_SECT_S;
-#endif
-
- for (i = 0; i < 16; i++) {
- unsigned long v = pgprot_val(protection_map[i]);
- protection_map[i] = __pgprot(v | default_pgprot);
- }
-
- pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
-}
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -196,11 +164,10 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
pgprot_t prot_pte;
if (map_io) {
- prot_sect = PMD_TYPE_SECT | PMD_SECT_AF |
- PMD_ATTRINDX(MT_DEVICE_nGnRE);
+ prot_sect = PROT_SECT_DEVICE_nGnRE;
prot_pte = __pgprot(PROT_DEVICE_nGnRE);
} else {
- prot_sect = prot_sect_kernel;
+ prot_sect = PROT_SECT_NORMAL_EXEC;
prot_pte = PAGE_KERNEL_EXEC;
}
@@ -242,7 +209,30 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_pmd(pud, addr, next, phys, map_io);
+
+ /*
+ * For 4K granule only, attempt to put down a 1GB block
+ */
+ if (!map_io && (PAGE_SHIFT == 12) &&
+ ((addr | next | phys) & ~PUD_MASK) == 0) {
+ pud_t old_pud = *pud;
+ set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
+
+ /*
+ * If we have an old value for a pud, it will
+ * be pointing to a pmd table that we no longer
+ * need (from swapper_pg_dir).
+ *
+ * Look up the old pmd table and free it.
+ */
+ if (!pud_none(old_pud)) {
+ phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+ memblock_free(table, PAGE_SIZE);
+ flush_tlb_all();
+ }
+ } else {
+ alloc_init_pmd(pud, addr, next, phys, map_io);
+ }
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -399,6 +389,9 @@ int kern_addr_valid(unsigned long addr)
if (pud_none(*pud))
return 0;
+ if (pud_sect(*pud))
+ return pfn_valid(pud_pfn(*pud));
+
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return 0;
@@ -446,7 +439,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!p)
return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
+ set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);