aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2023-04-18 22:22:02 -0700
committerAndrew Morton <akpm@linux-foundation.org>2023-04-21 14:52:04 -0700
commitf724392415b3e1ae844d2766669c0d955fe9a17b (patch)
tree7ce7fd319cf2f3ebd1c2f84464613c0b960feae2
parentmaple_tree: fix allocation in mas_sparse_area() (diff)
downloadwireguard-linux-f724392415b3e1ae844d2766669c0d955fe9a17b.tar.xz
wireguard-linux-f724392415b3e1ae844d2766669c0d955fe9a17b.zip
hugetlb: pte_alloc_huge() to replace huge pte_alloc_map()
Some architectures can have their hugetlb pages down at the lowest PTE level: their huge_pte_alloc() using pte_alloc_map(), but without any following pte_unmap(). Since none of these arches uses CONFIG_HIGHPTE, this is not seen as a problem at present; but would become a problem if forthcoming changes were to add an rcu_read_lock() into pte_offset_map(), with the rcu_read_unlock() expected in pte_unmap(). Similarly in their huge_pte_offset(): pte_offset_kernel() is good enough for that, but it's probably less confusing if we define pte_offset_huge() along with pte_alloc_huge(). Only define them without CONFIG_HIGHPTE: so there would be a build error to signal if ever more work is needed. For ease of development, define these now for 6.4-rc1, ahead of any use: then architectures can integrate patches using them, independent from mm. Link: https://lkml.kernel.org/r/ae9e7d98-8a3a-cfd9-4762-bcddffdf96cf@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/hugetlb.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 28703fe22386..cbe1e97a15a1 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -191,6 +191,23 @@ extern struct list_head huge_boot_pages;
/* arch callbacks */
+#ifndef CONFIG_HIGHPTE
+/*
+ * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
+ * which may go down to the lowest PTE level in their huge_pte_offset() and
+ * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
+ */
+static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
+{
+ return pte_offset_kernel(pmd, address);
+}
+static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
+}
+#endif
+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
/*