aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/pgtable.h
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2018-01-31 16:18:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 17:18:38 -0800
commit86fa949b050184ffc53688516a6a83ae5f98d08a (patch)
tree58d1c3e2704aa32706c528afd6030a70de60ae15 /arch/x86/include/asm/pgtable.h
parentsparc64: update pmdp_invalidate() to return old pmd value (diff)
downloadlinux-dev-86fa949b050184ffc53688516a6a83ae5f98d08a.tar.xz
linux-dev-86fa949b050184ffc53688516a6a83ae5f98d08a.zip
x86/mm: provide pmdp_establish() helper
We need an atomic way to setup pmd page table entry, avoiding races with CPU setting dirty/accessed bits. This is required to implement pmdp_invalidate() that doesn't lose these bits. On PAE we can avoid expensive cmpxchg8b for cases when new page table entry is not present. If it's present, fallback to cpmxchg loop. [akpm@linux-foundation.org: add missing `do' to do-while loop] Link: http://lkml.kernel.org/r/20171213105756.69879-10-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--arch/x86/include/asm/pgtable.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index e42b8943cb1a..63c2552b6b65 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1109,6 +1109,21 @@ static inline int pud_write(pud_t pud)
return pud_flags(pud) & _PAGE_RW;
}
+#ifndef pmdp_establish
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ if (IS_ENABLED(CONFIG_SMP)) {
+ return xchg(pmdp, pmd);
+ } else {
+ pmd_t old = *pmdp;
+ *pmdp = pmd;
+ return old;
+ }
+}
+#endif
+
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*