aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2014-06-04 16:05:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 16:53:51 -0700
commitc177c81e09e517bbf75b67762cdab1b83aba6976 (patch)
tree569aed2a30badb1ce3e5393de74b43eeeb22e376 /include
parentmm: fix sleeping function warning from __put_anon_vma (diff)
downloadlinux-dev-c177c81e09e517bbf75b67762cdab1b83aba6976.tar.xz
linux-dev-c177c81e09e517bbf75b67762cdab1b83aba6976.zip
hugetlb: restrict hugepage_migration_support() to x86_64
Currently hugepage migration is available for all archs which support pmd-level hugepage, but testing is done only for x86_64 and there're bugs for other archs. So to avoid breaking such archs, this patch limits the availability strictly to x86_64 until developers of other archs get interested in enabling this feature. Simply disabling hugepage migration on non-x86_64 archs is not enough to fix the reported problem where sys_move_pages() hits the BUG_ON() in follow_page(FOLL_GET), so let's fix this by checking if hugepage migration is supported in vma_migratable(). Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reported-by: Michael Ellerman <mpe@ellerman.id.au> Tested-by: Michael Ellerman <mpe@ellerman.id.au> Acked-by: Hugh Dickins <hughd@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Cc: <stable@vger.kernel.org> [3.12+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/hugetlb.h13
-rw-r--r--include/linux/mempolicy.h6
2 files changed, 11 insertions, 8 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b65166de1d9d..d0bad1a8b0bd 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page)
extern void dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
-int pmd_huge_support(void);
-/*
- * Currently hugepage migration is enabled only for pmd-based hugepage.
- * This function will be updated when hugepage migration is more widely
- * supported.
- */
static inline int hugepage_migration_support(struct hstate *h)
{
- return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+ return huge_page_shift(h) == PMD_SHIFT;
+#else
+ return 0;
+#endif
}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
@@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page)
return page->index;
}
#define dissolve_free_huge_pages(s, e) do {} while (0)
-#define pmd_huge_support() 0
#define hugepage_migration_support(h) 0
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 3c1b968da0ca..f230a978e6ba 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return 0;
+
+#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+ if (vma->vm_flags & VM_HUGETLB)
+ return 0;
+#endif
+
/*
* Migration allocates pages in the highest zone. If we cannot
* do so then migration (at least from node to node) is not