aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2024-02-14 21:44:29 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-02-22 15:27:17 -0800
commit2b42a7e531509577bd822aece610cd6d0dbf0dd7 (patch)
treeadaa163db3222cf952f6dfef890db5ab80bc200c
parentmm/memory: further separate anon and pagecache folio handling in zap_present_pte() (diff)
downloadwireguard-linux-2b42a7e531509577bd822aece610cd6d0dbf0dd7.tar.xz
wireguard-linux-2b42a7e531509577bd822aece610cd6d0dbf0dd7.zip
mm/memory: factor out zapping folio pte into zap_present_folio_pte()
Let's prepare for further changes by factoring it out into a separate function. Link: https://lkml.kernel.org/r/20240214204435.167852-5-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/memory.c53
1 files changed, 32 insertions, 21 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 0b4d76dcf232..168096f9360e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1530,30 +1530,14 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
}
-static inline void zap_present_pte(struct mmu_gather *tlb,
- struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
- unsigned long addr, struct zap_details *details,
- int *rss, bool *force_flush, bool *force_break)
+static inline void zap_present_folio_pte(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, struct folio *folio,
+ struct page *page, pte_t *pte, pte_t ptent, unsigned long addr,
+ struct zap_details *details, int *rss, bool *force_flush,
+ bool *force_break)
{
struct mm_struct *mm = tlb->mm;
bool delay_rmap = false;
- struct folio *folio;
- struct page *page;
-
- page = vm_normal_page(vma, addr, ptent);
- if (!page) {
- /* We don't need up-to-date accessed/dirty bits. */
- ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
- arch_check_zapped_pte(vma, ptent);
- tlb_remove_tlb_entry(tlb, pte, addr);
- VM_WARN_ON_ONCE(userfaultfd_wp(vma));
- ksm_might_unmap_zero_page(mm, ptent);
- return;
- }
-
- folio = page_folio(page);
- if (unlikely(!should_zap_folio(details, folio)))
- return;
if (!folio_test_anon(folio)) {
ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
@@ -1588,6 +1572,33 @@ static inline void zap_present_pte(struct mmu_gather *tlb,
}
}
+static inline void zap_present_pte(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
+ unsigned long addr, struct zap_details *details,
+ int *rss, bool *force_flush, bool *force_break)
+{
+ struct mm_struct *mm = tlb->mm;
+ struct folio *folio;
+ struct page *page;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page) {
+ /* We don't need up-to-date accessed/dirty bits. */
+ ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
+ arch_check_zapped_pte(vma, ptent);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ VM_WARN_ON_ONCE(userfaultfd_wp(vma));
+ ksm_might_unmap_zero_page(mm, ptent);
+ return;
+ }
+
+ folio = page_folio(page);
+ if (unlikely(!should_zap_folio(details, folio)))
+ return;
+ zap_present_folio_pte(tlb, vma, folio, page, pte, ptent, addr, details,
+ rss, force_flush, force_break);
+}
+
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,