diff options
author | 2022-06-10 12:41:48 -0700 | |
---|---|---|
committer | 2022-06-10 12:41:48 -0700 | |
commit | a32e7ea362356af8e89e67600432bad83d2325da (patch) | |
tree | 3f92a78ba91e573ef364d7b8c47bbfeb523ceedc /mm/filemap.c | |
parent | Merge tag 'devicetree-fixes-for-5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux (diff) | |
parent | mm: Add kernel-doc for folio->mlock_count (diff) | |
download | linux-dev-a32e7ea362356af8e89e67600432bad83d2325da.tar.xz linux-dev-a32e7ea362356af8e89e67600432bad83d2325da.zip |
Merge tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache
Pull folio fixes from Matthew Wilcox:
"Four folio-related fixes:
- Don't release a folio while it's still locked
- Fix a use-after-free after dropping the mmap_lock
- Fix a memory leak when splitting a page
- Fix a kernel-doc warning for struct folio"
* tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache:
mm: Add kernel-doc for folio->mlock_count
mm/huge_memory: Fix xarray node memory leak
filemap: Cache the value of vm_flags
filemap: Don't release a locked folio
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 9daeaab36081..ac3775c1ce4c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) struct address_space *mapping = file->f_mapping; DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); struct file *fpin = NULL; + unsigned long vm_flags = vmf->vma->vm_flags; unsigned int mmap_miss; #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Use the readahead code, even if readahead is disabled */ - if (vmf->vma->vm_flags & VM_HUGEPAGE) { + if (vm_flags & VM_HUGEPAGE) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); ra->size = HPAGE_PMD_NR; @@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) * Fetch two PMD folios, so we get the chance to actually * readahead, unless we've been told not to. */ - if (!(vmf->vma->vm_flags & VM_RAND_READ)) + if (!(vm_flags & VM_RAND_READ)) ra->size *= 2; ra->async_size = HPAGE_PMD_NR; page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER); @@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) #endif /* If we don't want any read-ahead, don't bother */ - if (vmf->vma->vm_flags & VM_RAND_READ) + if (vm_flags & VM_RAND_READ) return fpin; if (!ra->ra_pages) return fpin; - if (vmf->vma->vm_flags & VM_SEQ_READ) { + if (vm_flags & VM_SEQ_READ) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); page_cache_sync_ra(&ractl, ra->ra_pages); return fpin; |