aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2023-12-20 23:44:28 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-12-29 11:58:48 -0800
commitebe2e35ec0f256372c158a18de459fb60070b313 (patch)
tree83ad314d721c02f1d3608413631f89c5b36f693b /include/linux/mm.h
parentmm/rmap: introduce and use hugetlb_add_file_rmap() (diff)
downloadwireguard-linux-ebe2e35ec0f256372c158a18de459fb60070b313.tar.xz
wireguard-linux-ebe2e35ec0f256372c158a18de459fb60070b313.zip
mm/rmap: introduce and use hugetlb_try_dup_anon_rmap()
hugetlb rmap handling differs quite a lot from "ordinary" rmap code. For example, hugetlb currently only supports entire mappings, and treats any mapping as mapped using a single "logical PTE". Let's move it out of the way so we can overhaul our "ordinary" rmap. implementation/interface. So let's introduce and use hugetlb_try_dup_anon_rmap() to make all hugetlb handling use dedicated hugetlb_* rmap functions. Add sanity checks that we end up with the right folios in the right functions. Note that is_device_private_page() does not apply to hugetlb. Link: https://lkml.kernel.org/r/20231220224504.646757-5-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h12
1 files changed, 9 insertions, 3 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2563ffdb51bc..75bba6102825 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1964,15 +1964,21 @@ static inline bool page_maybe_dma_pinned(struct page *page)
*
* The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
*/
-static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
- struct page *page)
+static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct folio *folio)
{
VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;
- return page_maybe_dma_pinned(page);
+ return folio_maybe_dma_pinned(folio);
+}
+
+static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct page *page)
+{
+ return folio_needs_cow_for_dma(vma, page_folio(page));
}
/**