aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2012-03-21 16:33:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 17:54:57 -0700
commit025c5b2451e42c9e8dfdecd6dc84956ce8f321b5 (patch)
tree423b4ef1a0ce021360304a80f6e0ba902581a3ad /include/linux/huge_mm.h
parentpagemap: avoid splitting thp when reading /proc/pid/pagemap (diff)
downloadlinux-dev-025c5b2451e42c9e8dfdecd6dc84956ce8f321b5.tar.xz
linux-dev-025c5b2451e42c9e8dfdecd6dc84956ce8f321b5.zip
thp: optimize away unnecessary page table locking
Currently when we check if we can handle thp as it is or we need to split it into regular sized pages, we hold page table lock prior to check whether a given pmd is mapping thp or not. Because of this, when it's not "huge pmd" we suffer from unnecessary lock/unlock overhead. To remove it, this patch introduces a optimized check function and replace several similar logics with it. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: David Rientjes <rientjes@google.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 1b921299abc4..f56cacb4fec3 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -113,6 +113,18 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
+extern int __pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma);
+/* mmap_sem must be held on entry */
+static inline int pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma)
+{
+ VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
+ if (pmd_trans_huge(*pmd))
+ return __pmd_trans_huge_lock(pmd, vma);
+ else
+ return 0;
+}
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
@@ -176,6 +188,11 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next)
{
}
+static inline int pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma)
+{
+ return 0;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */