aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/mprotect.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-06 07:18:36 -0600
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-06 07:18:36 -0600
commitaeb542a1b5c507ea117d21c3e3e012ba16f065ac (patch)
tree5299600957d34a4eea5f6d2f2771d277f8a6bc8a /mm/mprotect.c
parentMerge tag 'devprop-5.6-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm (diff)
parentarch/Kconfig: update HAVE_RELIABLE_STACKTRACE description (diff)
downloadwireguard-linux-aeb542a1b5c507ea117d21c3e3e012ba16f065ac.tar.xz
wireguard-linux-aeb542a1b5c507ea117d21c3e3e012ba16f065ac.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "7 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: arch/Kconfig: update HAVE_RELIABLE_STACKTRACE description mm, hotplug: fix page online with DEBUG_PAGEALLOC compiled but not enabled mm/z3fold.c: do not include rwlock.h directly fat: fix uninit-memory access for partial initialized inode mm: avoid data corruption on CoW fault into PFN-mapped VMA mm: fix possible PMD dirty bit lost in set_pmd_migration_entry() mm, numa: fix bad pmd by atomically check for pmd_trans_huge when marking page tables prot_numa
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r--mm/mprotect.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7a8e84f86831..311c0dadf71c 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -161,6 +161,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
return pages;
}
+/*
+ * Used when setting automatic NUMA hinting protection where it is
+ * critical that a numa hinting PMD is not confused with a bad PMD.
+ */
+static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
+{
+ pmd_t pmdval = pmd_read_atomic(pmd);
+
+ /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+
+ if (pmd_none(pmdval))
+ return 1;
+ if (pmd_trans_huge(pmdval))
+ return 0;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
@@ -178,8 +203,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
unsigned long this_pages;
next = pmd_addr_end(addr, end);
- if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
- && pmd_none_or_clear_bad(pmd))
+
+ /*
+ * Automatic NUMA balancing walks the tables with mmap_sem
+ * held for read. It's possible a parallel update to occur
+ * between pmd_trans_huge() and a pmd_none_or_clear_bad()
+ * check leading to a false positive and clearing.
+ * Hence, it's necessary to atomically read the PMD value
+ * for all the checks.
+ */
+ if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
+ pmd_none_or_clear_bad_unless_trans_huge(pmd))
goto next;
/* invoke the mmu notifier if the pmd is populated */