diff options
author | 2022-06-10 12:41:48 -0700 | |
---|---|---|
committer | 2022-06-10 12:41:48 -0700 | |
commit | a32e7ea362356af8e89e67600432bad83d2325da (patch) | |
tree | 3f92a78ba91e573ef364d7b8c47bbfeb523ceedc /mm | |
parent | Merge tag 'devicetree-fixes-for-5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux (diff) | |
parent | mm: Add kernel-doc for folio->mlock_count (diff) | |
download | wireguard-linux-a32e7ea362356af8e89e67600432bad83d2325da.tar.xz wireguard-linux-a32e7ea362356af8e89e67600432bad83d2325da.zip |
Merge tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache
Pull folio fixes from Matthew Wilcox:
"Four folio-related fixes:
- Don't release a folio while it's still locked
- Fix a use-after-free after dropping the mmap_lock
- Fix a memory leak when splitting a page
- Fix a kernel-doc warning for struct folio"
* tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache:
mm: Add kernel-doc for folio->mlock_count
mm/huge_memory: Fix xarray node memory leak
filemap: Cache the value of vm_flags
filemap: Don't release a locked folio
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 9 | ||||
-rw-r--r-- | mm/huge_memory.c | 3 | ||||
-rw-r--r-- | mm/readahead.c | 2 |
3 files changed, 8 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 9daeaab36081..ac3775c1ce4c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) struct address_space *mapping = file->f_mapping; DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); struct file *fpin = NULL; + unsigned long vm_flags = vmf->vma->vm_flags; unsigned int mmap_miss; #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Use the readahead code, even if readahead is disabled */ - if (vmf->vma->vm_flags & VM_HUGEPAGE) { + if (vm_flags & VM_HUGEPAGE) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); ra->size = HPAGE_PMD_NR; @@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) * Fetch two PMD folios, so we get the chance to actually * readahead, unless we've been told not to. */ - if (!(vmf->vma->vm_flags & VM_RAND_READ)) + if (!(vm_flags & VM_RAND_READ)) ra->size *= 2; ra->async_size = HPAGE_PMD_NR; page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER); @@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) #endif /* If we don't want any read-ahead, don't bother */ - if (vmf->vma->vm_flags & VM_RAND_READ) + if (vm_flags & VM_RAND_READ) return fpin; if (!ra->ra_pages) return fpin; - if (vmf->vma->vm_flags & VM_SEQ_READ) { + if (vm_flags & VM_SEQ_READ) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); page_cache_sync_ra(&ractl, ra->ra_pages); return fpin; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a77c78a2b6b5..f7248002dad9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2672,8 +2672,7 @@ out_unlock: if (mapping) i_mmap_unlock_read(mapping); out: - /* Free any memory we didn't use */ - xas_nomem(&xas, 0); + xas_destroy(&xas); count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); return ret; } diff --git a/mm/readahead.c b/mm/readahead.c index 415c39d764ea..57a015108254 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -164,12 +164,14 @@ static void read_pages(struct readahead_control *rac) while ((folio = readahead_folio(rac)) != NULL) { unsigned long nr = folio_nr_pages(folio); + folio_get(folio); rac->ra->size -= nr; if (rac->ra->async_size >= nr) { rac->ra->async_size -= nr; filemap_remove_folio(folio); } folio_unlock(folio); + folio_put(folio); } } else { while ((folio = readahead_folio(rac)) != NULL) |