aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-11-08 18:28:05 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-12-10 16:51:38 -0800
commit01d1e0e6b7d99ebaf2e42d2205595080b7d0c271 (patch)
tree44908edde8dda5f68929a2052dd1b8446e790590 /mm/memory.c
parentmm: make mapping_evict_folio() the preferred way to evict clean folios (diff)
downloadwireguard-linux-01d1e0e6b7d99ebaf2e42d2205595080b7d0c271.tar.xz
wireguard-linux-01d1e0e6b7d99ebaf2e42d2205595080b7d0c271.zip
mm: convert __do_fault() to use a folio
Convert vmf->page to a folio as soon as we're going to use it. This fixes a bug if the fault handler returns a tail page with hardware poison; tail pages have an invalid page->index, so we would fail to unmap the page from the page tables. We actually have to unmap the entire folio (or mapping_evict_folio() will fail), so use unmap_mapping_folio() instead. This also saves various calls to compound_head() hidden in lock_page(), put_page(), etc. Link: https://lkml.kernel.org/r/20231108182809.602073-3-willy@infradead.org Fixes: 793917d997df ("mm/readahead: Add large folio readahead") Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a7025ed5c65b..e27e2e5beb3f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4245,6 +4245,7 @@ oom:
static vm_fault_t __do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
vm_fault_t ret;
/*
@@ -4273,27 +4274,26 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
VM_FAULT_DONE_COW)))
return ret;
+ folio = page_folio(vmf->page);
if (unlikely(PageHWPoison(vmf->page))) {
- struct page *page = vmf->page;
vm_fault_t poisonret = VM_FAULT_HWPOISON;
if (ret & VM_FAULT_LOCKED) {
- if (page_mapped(page))
- unmap_mapping_pages(page_mapping(page),
- page->index, 1, false);
- /* Retry if a clean page was removed from the cache. */
- if (invalidate_inode_page(page))
+ if (page_mapped(vmf->page))
+ unmap_mapping_folio(folio);
+ /* Retry if a clean folio was removed from the cache. */
+ if (mapping_evict_folio(folio->mapping, folio))
poisonret = VM_FAULT_NOPAGE;
- unlock_page(page);
+ folio_unlock(folio);
}
- put_page(page);
+ folio_put(folio);
vmf->page = NULL;
return poisonret;
}
if (unlikely(!(ret & VM_FAULT_LOCKED)))
- lock_page(vmf->page);
+ folio_lock(folio);
else
- VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
+ VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
return ret;
}