aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-01-15 16:56:52 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 17:56:32 -0800
commit5c7fb56e5e3f7035dd798a8e1adee639f87043e5 (patch)
treee3419de32c4b42c918267a50120549cbec2ed63f /include/linux/huge_mm.h
parentmm, dax, pmem: introduce {get|put}_dev_pagemap() for dax-gup (diff)
downloadlinux-dev-5c7fb56e5e3f7035dd798a8e1adee639f87043e5.tar.xz
linux-dev-5c7fb56e5e3f7035dd798a8e1adee639f87043e5.zip
mm, dax: dax-pmd vs thp-pmd vs hugetlbfs-pmd
A dax-huge-page mapping while it uses some thp helpers is ultimately not a transparent huge page. The distinction is especially important in the get_user_pages() path. pmd_devmap() is used to distinguish dax-pmds from pmd_huge() and pmd_trans_huge() which have slightly different semantics. Explicitly mark the pmd_trans_huge() helpers that dax needs by adding pmd_devmap() checks. [kirill.shutemov@linux.intel.com: fix regression in handling mlocked pages in __split_huge_pmd()] Signed-off-by: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave@sr71.net> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Matthew Wilcox <willy@linux.intel.com> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8ca35a131904..d39fa60bd6bf 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -104,7 +104,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (pmd_trans_huge(*____pmd)) \
+ if (pmd_trans_huge(*____pmd) \
+ || pmd_devmap(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address); \
} while (0)
@@ -124,7 +125,7 @@ static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl)
{
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
- if (pmd_trans_huge(*pmd))
+ if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma, ptl);
else
return false;