aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-14 15:10:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-14 15:10:10 -0700
commitf261c4e529dac5608a604d3dd3ae1cd2adf23c89 (patch)
tree5e73077db2d1bd0011531941c2cb4cd65aeab78a /mm
parentMerge tag 'acpi-5.1-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm (diff)
parentinclude/linux/swap.h: use offsetof() instead of custom __swapoffset macro (diff)
downloadlinux-dev-f261c4e529dac5608a604d3dd3ae1cd2adf23c89.tar.xz
linux-dev-f261c4e529dac5608a604d3dd3ae1cd2adf23c89.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc patches from Andrew Morton: - a little bit more MM - a few fixups [ The "little bit more MM" is actually just one of the three patches Andrew sent for mm/filemap.c, I'm still mulling over two more of them from Josef Bacik - Linus ] * emailed patches from Andrew Morton <akpm@linux-foundation.org>: include/linux/swap.h: use offsetof() instead of custom __swapoffset macro tools/testing/selftests/proc/proc-pid-vm.c: test with vsyscall in mind zram: default to lzo-rle instead of lzo filemap: pass vm_fault to the mmap ra helpers
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index a3b4021c448f..ec6566ffbd90 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2420,20 +2420,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
* Synchronous readahead happens when we don't even find
* a page in the page cache at all.
*/
-static void do_sync_mmap_readahead(struct vm_area_struct *vma,
- struct file_ra_state *ra,
- struct file *file,
- pgoff_t offset)
+static void do_sync_mmap_readahead(struct vm_fault *vmf)
{
+ struct file *file = vmf->vma->vm_file;
+ struct file_ra_state *ra = &file->f_ra;
struct address_space *mapping = file->f_mapping;
+ pgoff_t offset = vmf->pgoff;
/* If we don't want any read-ahead, don't bother */
- if (vma->vm_flags & VM_RAND_READ)
+ if (vmf->vma->vm_flags & VM_RAND_READ)
return;
if (!ra->ra_pages)
return;
- if (vma->vm_flags & VM_SEQ_READ) {
+ if (vmf->vma->vm_flags & VM_SEQ_READ) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
@@ -2463,16 +2463,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
* Asynchronous readahead happens when we find the page and PG_readahead,
* so we want to possibly extend the readahead further..
*/
-static void do_async_mmap_readahead(struct vm_area_struct *vma,
- struct file_ra_state *ra,
- struct file *file,
- struct page *page,
- pgoff_t offset)
+static void do_async_mmap_readahead(struct vm_fault *vmf,
+ struct page *page)
{
+ struct file *file = vmf->vma->vm_file;
+ struct file_ra_state *ra = &file->f_ra;
struct address_space *mapping = file->f_mapping;
+ pgoff_t offset = vmf->pgoff;
/* If we don't want any read-ahead, don't bother */
- if (vma->vm_flags & VM_RAND_READ)
+ if (vmf->vma->vm_flags & VM_RAND_READ)
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
@@ -2531,10 +2531,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* We found the page, so try async readahead before
* waiting for the lock.
*/
- do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
+ do_async_mmap_readahead(vmf, page);
} else if (!page) {
/* No page in the page cache at all */
- do_sync_mmap_readahead(vmf->vma, ra, file, offset);
+ do_sync_mmap_readahead(vmf);
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;