aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-10 10:39:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-10 10:39:04 -0800
commitc32c2cb27268f0adad0b6afbebe860ac5250c388 (patch)
tree046bc27d2b62c19cb31a1510ddae8e8f61e2a97a /arch
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux (diff)
parentarm64: hugetlb: partial revert of 66b3923a1a0f (diff)
downloadlinux-dev-c32c2cb27268f0adad0b6afbebe860ac5250c388.tar.xz
linux-dev-c32c2cb27268f0adad0b6afbebe860ac5250c388.zip
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "I thought we were done for 4.5, but then the 64k-page chaps came crawling out of the woodwork. *sigh* The vmemmap fix I sent for -rc7 caused a regression with 64k pages and sparsemem and at some point during the release cycle the new hugetlb code using contiguous ptes started failing the libhugetlbfs tests with 64k pages enabled. So here are a couple of patches that fix the vmemmap alignment and disable the new hugetlb page sizes whilst a proper fix is being developed: - Temporarily disable huge pages built using contiguous ptes - Ensure vmemmap region is sufficiently aligned for sparsemem sections" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: hugetlb: partial revert of 66b3923a1a0f arm64: account for sparsemem section alignment when choosing vmemmap offset
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
2 files changed, 3 insertions, 16 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f50608674580..819aff5d593f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -40,7 +40,7 @@
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules
*/
-#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
+#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
#ifndef CONFIG_KASAN
#define VMALLOC_START (VA_START)
@@ -52,7 +52,8 @@
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define VMEMMAP_START (VMALLOC_END + SZ_64K)
-#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
+#define vmemmap ((struct page *)VMEMMAP_START - \
+ SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 82d607c3614e..da30529bb1f6 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -306,10 +306,6 @@ static __init int setup_hugepagesz(char *opt)
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
- } else if (ps == (PAGE_SIZE * CONT_PTES)) {
- hugetlb_add_hstate(CONT_PTE_SHIFT);
- } else if (ps == (PMD_SIZE * CONT_PMDS)) {
- hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
} else {
pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
return 0;
@@ -317,13 +313,3 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
-
-#ifdef CONFIG_ARM64_64K_PAGES
-static __init int add_default_hugepagesz(void)
-{
- if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
- hugetlb_add_hstate(CONT_PMD_SHIFT);
- return 0;
-}
-arch_initcall(add_default_hugepagesz);
-#endif