From ebf921a9fac38560e0fc3a4381e163a6969efd5a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 22 Jan 2022 15:46:22 -0500 Subject: readahead: Remove read_cache_pages() With no remaining users, remove this function and the related infrastructure. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro --- mm/readahead.c | 76 ---------------------------------------------------------- 1 file changed, 76 deletions(-) (limited to 'mm') diff --git a/mm/readahead.c b/mm/readahead.c index d3a47546d17d..9097af639beb 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -142,82 +142,6 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) } EXPORT_SYMBOL_GPL(file_ra_state_init); -/* - * see if a page needs releasing upon read_cache_pages() failure - * - the caller of read_cache_pages() may have set PG_private or PG_fscache - * before calling, such as the NFS fs marking pages that are cached locally - * on disk, thus we need to give the fs a chance to clean up in the event of - * an error - */ -static void read_cache_pages_invalidate_page(struct address_space *mapping, - struct page *page) -{ - if (page_has_private(page)) { - if (!trylock_page(page)) - BUG(); - page->mapping = mapping; - folio_invalidate(page_folio(page), 0, PAGE_SIZE); - page->mapping = NULL; - unlock_page(page); - } - put_page(page); -} - -/* - * release a list of pages, invalidating them first if need be - */ -static void read_cache_pages_invalidate_pages(struct address_space *mapping, - struct list_head *pages) -{ - struct page *victim; - - while (!list_empty(pages)) { - victim = lru_to_page(pages); - list_del(&victim->lru); - read_cache_pages_invalidate_page(mapping, victim); - } -} - -/** - * read_cache_pages - populate an address space with some pages & start reads against them - * @mapping: the address_space - * @pages: The address of a list_head which contains the target pages. These - * pages have their ->index populated and are otherwise uninitialised. - * @filler: callback routine for filling a single page. - * @data: private data for the callback routine. - * - * Hides the details of the LRU cache etc from the filesystems. - * - * Returns: %0 on success, error return by @filler otherwise - */ -int read_cache_pages(struct address_space *mapping, struct list_head *pages, - int (*filler)(void *, struct page *), void *data) -{ - struct page *page; - int ret = 0; - - while (!list_empty(pages)) { - page = lru_to_page(pages); - list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, page->index, - readahead_gfp_mask(mapping))) { - read_cache_pages_invalidate_page(mapping, page); - continue; - } - put_page(page); - - ret = filler(data, page); - if (unlikely(ret)) { - read_cache_pages_invalidate_pages(mapping, pages); - break; - } - task_io_account_read(PAGE_SIZE); - } - return ret; -} - -EXPORT_SYMBOL(read_cache_pages); - static void read_pages(struct readahead_control *rac, struct list_head *pages, bool skip_page) { -- cgit v1.2.3-59-g8ed1b From 704528d895dd3e7b173e672116b4eb2b0a0fceb0 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 23 Mar 2022 21:29:04 -0400 Subject: fs: Remove ->readpages address space operation All filesystems have now been converted to use ->readahead, so remove the ->readpages operation and fix all the comments that used to refer to it. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro --- Documentation/filesystems/fsverity.rst | 6 +++--- Documentation/filesystems/locking.rst | 6 ------ Documentation/filesystems/vfs.rst | 11 ----------- fs/btrfs/reflink.c | 4 ++-- fs/cifs/cifssmb.c | 2 +- fs/cifs/inode.c | 2 +- fs/crypto/crypto.c | 2 +- fs/ext4/readpage.c | 2 +- fs/f2fs/data.c | 4 ++-- fs/fuse/fuse_i.h | 2 +- fs/verity/verify.c | 4 ++-- include/linux/fs.h | 6 ------ include/linux/fsverity.h | 2 +- mm/filemap.c | 2 +- mm/readahead.c | 15 ++------------- 15 files changed, 18 insertions(+), 52 deletions(-) (limited to 'mm') diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst index 1d831e3cbcb3..8cc536d08f51 100644 --- a/Documentation/filesystems/fsverity.rst +++ b/Documentation/filesystems/fsverity.rst @@ -549,7 +549,7 @@ Pagecache ~~~~~~~~~ For filesystems using Linux's pagecache, the ``->readpage()`` and -``->readpages()`` methods must be modified to verify pages before they +``->readahead()`` methods must be modified to verify pages before they are marked Uptodate. Merely hooking ``->read_iter()`` would be insufficient, since ``->read_iter()`` is not used for memory maps. @@ -611,7 +611,7 @@ workqueue, and then the workqueue work does the decryption or verification. Finally, pages where no decryption or verity error occurred are marked Uptodate, and the pages are unlocked. -Files on ext4 and f2fs may contain holes. Normally, ``->readpages()`` +Files on ext4 and f2fs may contain holes. Normally, ``->readahead()`` simply zeroes holes and sets the corresponding pages Uptodate; no bios are issued. To prevent this case from bypassing fs-verity, these filesystems use fsverity_verify_page() to verify hole pages. @@ -778,7 +778,7 @@ weren't already directly answered in other parts of this document. - To prevent bypassing verification, pages must not be marked Uptodate until they've been verified. Currently, each filesystem is responsible for marking pages Uptodate via - ``->readpages()``. Therefore, currently it's not possible for + ``->readahead()``. Therefore, currently it's not possible for the VFS to do the verification on its own. Changing this would require significant changes to the VFS and all filesystems. diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 2998cec9af4b..c26d854275a0 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -241,8 +241,6 @@ prototypes:: int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *folio); void (*readahead)(struct readahead_control *); - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); @@ -274,7 +272,6 @@ readpage: yes, unlocks shared writepages: dirty_folio maybe readahead: yes, unlocks shared -readpages: no shared write_begin: locks the page exclusive write_end: yes, unlocks exclusive bmap: @@ -300,9 +297,6 @@ completion. ->readahead() unlocks the pages that I/O is attempted on like ->readpage(). -->readpages() populates the pagecache with the passed pages and starts -I/O against them. They come unlocked upon I/O completion. - ->writepage() is used for two purposes: for "memory cleansing" and for "sync". These are quite different operations and the behaviour may differ depending upon the mode. diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 4f14edf93941..794bd1a66bfb 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -726,8 +726,6 @@ cache in your filesystem. The following members are defined: int (*writepages)(struct address_space *, struct writeback_control *); bool (*dirty_folio)(struct address_space *, struct folio *); void (*readahead)(struct readahead_control *); - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); @@ -817,15 +815,6 @@ cache in your filesystem. The following members are defined: completes successfully. Setting PageError on any page will be ignored; simply unlock the page if an I/O error occurs. -``readpages`` - called by the VM to read pages associated with the address_space - object. This is essentially just a vector version of readpage. - Instead of just one page, several pages are requested. - readpages is only used for read-ahead, so read errors are - ignored. If anything goes wrong, feel free to give up. - This interface is deprecated and will be removed by the end of - 2020; implement readahead instead. - ``write_begin`` Called by the generic buffered write code to ask the filesystem to prepare to write len bytes at the given offset in the file. diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index 04a88bfe4fcf..998e3f180d90 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -645,7 +645,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, int ret; /* - * Lock destination range to serialize with concurrent readpages() and + * Lock destination range to serialize with concurrent readahead() and * source range to serialize with relocation. */ btrfs_double_extent_lock(src, loff, dst, dst_loff, len); @@ -739,7 +739,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, } /* - * Lock destination range to serialize with concurrent readpages() and + * Lock destination range to serialize with concurrent readahead() and * source range to serialize with relocation. */ btrfs_double_extent_lock(src, off, inode, destoff, len); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 071e2f21a7db..bc3ded4f34f6 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -597,7 +597,7 @@ CIFSSMBNegotiate(const unsigned int xid, set_credits(server, server->maxReq); /* probably no need to store and check maxvcs */ server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); - /* set up max_read for readpages check */ + /* set up max_read for readahead check */ server->max_read = server->maxBuf; server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 60d853c92f6a..2f9e7d2f81b6 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -49,7 +49,7 @@ static void cifs_set_ops(struct inode *inode) inode->i_fop = &cifs_file_ops; } - /* check if server can support readpages */ + /* check if server can support readahead */ if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read < PAGE_SIZE + MAX_CIFS_HDR_SIZE) inode->i_data.a_ops = &cifs_addr_ops_smallbuf; diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 4fcca79f39ae..526a4c1bed99 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -248,7 +248,7 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace); * which must still be locked and not uptodate. Normally, blocksize == * PAGE_SIZE and the whole page is decrypted at once. * - * This is for use by the filesystem's ->readpages() method. + * This is for use by the filesystem's ->readahead() method. * * Return: 0 on success; -errno on failure */ diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 1aa26d6634fc..af491e170c4a 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -109,7 +109,7 @@ static void verity_work(struct work_struct *work) struct bio *bio = ctx->bio; /* - * fsverity_verify_bio() may call readpages() again, and although verity + * fsverity_verify_bio() may call readahead() again, and although verity * will be disabled for that, decryption may still be needed, causing * another bio_post_read_ctx to be allocated. So to guarantee that * mempool_alloc() never deadlocks we must free the current ctx first. diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index f8fcbe91059b..c92920c8661d 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -164,7 +164,7 @@ static void f2fs_verify_bio(struct work_struct *work) bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS); /* - * fsverity_verify_bio() may call readpages() again, and while verity + * fsverity_verify_bio() may call readahead() again, and while verity * will be disabled for this, decryption and/or decompression may still * be needed, resulting in another bio_post_read_ctx being allocated. * So to prevent deadlocks we need to release the current ctx to the @@ -2392,7 +2392,7 @@ static void f2fs_readahead(struct readahead_control *rac) if (!f2fs_is_compress_backend_ready(inode)) return; - /* If the file has inline data, skip readpages */ + /* If the file has inline data, skip readahead */ if (f2fs_has_inline_data(inode)) return; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index eac4984cc753..488b460e046f 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -627,7 +627,7 @@ struct fuse_conn { /** Connection successful. Only set in INIT */ unsigned conn_init:1; - /** Do readpages asynchronously? Only set in INIT */ + /** Do readahead asynchronously? Only set in INIT */ unsigned async_read:1; /** Return an unique read error after abort. Only set in INIT */ diff --git a/fs/verity/verify.c b/fs/verity/verify.c index 0adb970f4e73..14e2fb49cff5 100644 --- a/fs/verity/verify.c +++ b/fs/verity/verify.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Data verification functions, i.e. hooks for ->readpages() + * Data verification functions, i.e. hooks for ->readahead() * * Copyright 2019 Google LLC */ @@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page); * that fail verification are set to the Error state. Verification is skipped * for pages already in the Error state, e.g. due to fscrypt decryption failure. * - * This is a helper function for use by the ->readpages() method of filesystems + * This is a helper function for use by the ->readahead() method of filesystems * that issue bios to read data directly into the page cache. Filesystems that * populate the page cache without issuing bios (e.g. non block-based * filesystems) must instead call fsverity_verify_page() directly on each page. diff --git a/include/linux/fs.h b/include/linux/fs.h index 183160872133..7c81887cc7e8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -370,12 +370,6 @@ struct address_space_operations { /* Mark a folio dirty. Return true if this dirtied it */ bool (*dirty_folio)(struct address_space *, struct folio *); - /* - * Reads in the requested pages. Unlike ->readpage(), this is - * PURELY used for read-ahead!. - */ - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index b568b3c7d095..a7afc800bd8d 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -221,7 +221,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) * * This checks whether ->i_verity_info has been set. * - * Filesystems call this from ->readpages() to check whether the pages need to + * Filesystems call this from ->readahead() to check whether the pages need to * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to * a race condition where the file is being read concurrently with * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) diff --git a/mm/filemap.c b/mm/filemap.c index 647d72bf23b6..d904cd7e4181 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2538,7 +2538,7 @@ static int filemap_create_folio(struct file *file, * the page cache as the locked folio would then be enough to * synchronize with hole punching. But there are code paths * such as filemap_update_page() filling in partially uptodate - * pages or ->readpages() that need to hold invalidate_lock + * pages or ->readahead() that need to hold invalidate_lock * while mapping blocks for IO so let's hold the lock here as * well to keep locking rules simple. */ diff --git a/mm/readahead.c b/mm/readahead.c index 9097af639beb..297bd0719cda 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -170,13 +170,6 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages, unlock_page(page); put_page(page); } - } else if (aops->readpages) { - aops->readpages(rac->file, rac->mapping, pages, - readahead_count(rac)); - /* Clean up the remaining pages */ - put_pages_list(pages); - rac->_index += rac->_nr_pages; - rac->_nr_pages = 0; } else { while ((page = readahead_page(rac))) { aops->readpage(rac->file, page); @@ -253,10 +246,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, folio = filemap_alloc_folio(gfp_mask, 0); if (!folio) break; - if (mapping->a_ops->readpages) { - folio->index = index + i; - list_add(&folio->lru, &page_pool); - } else if (filemap_add_folio(mapping, folio, index + i, + if (filemap_add_folio(mapping, folio, index + i, gfp_mask) < 0) { folio_put(folio); read_pages(ractl, &page_pool, true); @@ -318,8 +308,7 @@ void force_page_cache_ra(struct readahead_control *ractl, struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages, index; - if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages && - !mapping->a_ops->readahead)) + if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead)) return; /* -- cgit v1.2.3-59-g8ed1b From dfd8b4fc76d5f7ae5663328b791c4acf222c4d39 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 31 Mar 2022 05:35:23 -0700 Subject: mm: remove the pages argument to read_pages This is always an empty list or NULL with the removal of the ->readahead support, so remove it. Signed-off-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro Signed-off-by: Matthew Wilcox (Oracle) --- mm/readahead.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/readahead.c b/mm/readahead.c index 297bd0719cda..05207a663801 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -142,8 +142,7 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) } EXPORT_SYMBOL_GPL(file_ra_state_init); -static void read_pages(struct readahead_control *rac, struct list_head *pages, - bool skip_page) +static void read_pages(struct readahead_control *rac, bool skip_page) { const struct address_space_operations *aops = rac->mapping->a_ops; struct page *page; @@ -179,7 +178,6 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages, blk_finish_plug(&plug); - BUG_ON(pages && !list_empty(pages)); BUG_ON(readahead_count(rac)); out: @@ -206,7 +204,6 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, { struct address_space *mapping = ractl->mapping; unsigned long index = readahead_index(ractl); - LIST_HEAD(page_pool); gfp_t gfp_mask = readahead_gfp_mask(mapping); unsigned long i; @@ -238,7 +235,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, * have a stable reference to this page, and it's * not worth getting one just for that. */ - read_pages(ractl, &page_pool, true); + read_pages(ractl, true); i = ractl->_index + ractl->_nr_pages - index - 1; continue; } @@ -249,7 +246,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, if (filemap_add_folio(mapping, folio, index + i, gfp_mask) < 0) { folio_put(folio); - read_pages(ractl, &page_pool, true); + read_pages(ractl, true); i = ractl->_index + ractl->_nr_pages - index - 1; continue; } @@ -263,7 +260,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, * uptodate then the caller will launch readpage again, and * will then handle the error. */ - read_pages(ractl, &page_pool, false); + read_pages(ractl, false); filemap_invalidate_unlock_shared(mapping); memalloc_nofs_restore(nofs); } @@ -537,7 +534,7 @@ void page_cache_ra_order(struct readahead_control *ractl, ra->async_size += index - limit - 1; } - read_pages(ractl, NULL, false); + read_pages(ractl, false); /* * If there were already pages in the page cache, then we may have -- cgit v1.2.3-59-g8ed1b From b4e089d705eef82364945abae325cd241c80e107 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 31 Mar 2022 05:35:55 -0700 Subject: mm: remove the skip_page argument to read_pages The skip_page argument to read_pages controls if rac->_index is incremented before returning from the function. Just open code that in the callers. Signed-off-by: Christoph Hellwig Reviewed-by: Al Viro Acked-by: Al Viro Signed-off-by: Matthew Wilcox (Oracle) --- mm/readahead.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/readahead.c b/mm/readahead.c index 05207a663801..2e5c695b303d 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -142,14 +142,14 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) } EXPORT_SYMBOL_GPL(file_ra_state_init); -static void read_pages(struct readahead_control *rac, bool skip_page) +static void read_pages(struct readahead_control *rac) { const struct address_space_operations *aops = rac->mapping->a_ops; struct page *page; struct blk_plug plug; if (!readahead_count(rac)) - goto out; + return; blk_start_plug(&plug); @@ -179,10 +179,6 @@ static void read_pages(struct readahead_control *rac, bool skip_page) blk_finish_plug(&plug); BUG_ON(readahead_count(rac)); - -out: - if (skip_page) - rac->_index++; } /** @@ -235,7 +231,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, * have a stable reference to this page, and it's * not worth getting one just for that. */ - read_pages(ractl, true); + read_pages(ractl); + ractl->_index++; i = ractl->_index + ractl->_nr_pages - index - 1; continue; } @@ -246,7 +243,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, if (filemap_add_folio(mapping, folio, index + i, gfp_mask) < 0) { folio_put(folio); - read_pages(ractl, true); + read_pages(ractl); + ractl->_index++; i = ractl->_index + ractl->_nr_pages - index - 1; continue; } @@ -260,7 +258,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, * uptodate then the caller will launch readpage again, and * will then handle the error. */ - read_pages(ractl, false); + read_pages(ractl); filemap_invalidate_unlock_shared(mapping); memalloc_nofs_restore(nofs); } @@ -534,7 +532,7 @@ void page_cache_ra_order(struct readahead_control *ractl, ra->async_size += index - limit - 1; } - read_pages(ractl, false); + read_pages(ractl); /* * If there were already pages in the page cache, then we may have -- cgit v1.2.3-59-g8ed1b From 1e4702806faca1551733f58be17ea11a9d214e91 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 31 Mar 2022 15:02:34 -0400 Subject: readahead: Update comments - Refer to folios where appropriate, not pages (Matthew Wilcox) - Eliminate references to the internal PG_readhead - Use "readahead" consistently - not "read-ahead" or "read ahead" (mostly Neil Brown) - Clarify some sections that, on reflection, weren't very clear (Neil Brown) - Minor punctuation/spelling fixes (Neil Brown) Signed-off-by: Matthew Wilcox (Oracle) --- mm/readahead.c | 92 ++++++++++++++++++++++++++++------------------------------ 1 file changed, 45 insertions(+), 47 deletions(-) (limited to 'mm') diff --git a/mm/readahead.c b/mm/readahead.c index 2e5c695b303d..8e3775829513 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -13,29 +13,29 @@ * * Readahead is used to read content into the page cache before it is * explicitly requested by the application. Readahead only ever - * attempts to read pages that are not yet in the page cache. If a - * page is present but not up-to-date, readahead will not try to read + * attempts to read folios that are not yet in the page cache. If a + * folio is present but not up-to-date, readahead will not try to read * it. In that case a simple ->readpage() will be requested. * * Readahead is triggered when an application read request (whether a - * systemcall or a page fault) finds that the requested page is not in + * system call or a page fault) finds that the requested folio is not in * the page cache, or that it is in the page cache and has the - * %PG_readahead flag set. This flag indicates that the page was loaded - * as part of a previous read-ahead request and now that it has been - * accessed, it is time for the next read-ahead. + * readahead flag set. This flag indicates that the folio was read + * as part of a previous readahead request and now that it has been + * accessed, it is time for the next readahead. * * Each readahead request is partly synchronous read, and partly async - * read-ahead. This is reflected in the struct file_ra_state which - * contains ->size being to total number of pages, and ->async_size - * which is the number of pages in the async section. The first page in - * this async section will have %PG_readahead set as a trigger for a - * subsequent read ahead. Once a series of sequential reads has been + * readahead. This is reflected in the struct file_ra_state which + * contains ->size being the total number of pages, and ->async_size + * which is the number of pages in the async section. The readahead + * flag will be set on the first folio in this async section to trigger + * a subsequent readahead. Once a series of sequential reads has been * established, there should be no need for a synchronous component and - * all read ahead request will be fully asynchronous. + * all readahead request will be fully asynchronous. * - * When either of the triggers causes a readahead, three numbers need to - * be determined: the start of the region, the size of the region, and - * the size of the async tail. + * When either of the triggers causes a readahead, three numbers need + * to be determined: the start of the region to read, the size of the + * region, and the size of the async tail. * * The start of the region is simply the first page address at or after * the accessed address, which is not currently populated in the page @@ -45,14 +45,14 @@ * was explicitly requested from the determined request size, unless * this would be less than zero - then zero is used. NOTE THIS * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED - * PAGE. + * PAGE. ALSO THIS CALCULATION IS NOT USED CONSISTENTLY. * * The size of the region is normally determined from the size of the * previous readahead which loaded the preceding pages. This may be * discovered from the struct file_ra_state for simple sequential reads, * or from examining the state of the page cache when multiple * sequential reads are interleaved. Specifically: where the readahead - * was triggered by the %PG_readahead flag, the size of the previous + * was triggered by the readahead flag, the size of the previous * readahead is assumed to be the number of pages from the triggering * page to the start of the new readahead. In these cases, the size of * the previous readahead is scaled, often doubled, for the new @@ -65,52 +65,52 @@ * larger than the current request, and it is not scaled up, unless it * is at the start of file. * - * In general read ahead is accelerated at the start of the file, as + * In general readahead is accelerated at the start of the file, as * reads from there are often sequential. There are other minor - * adjustments to the read ahead size in various special cases and these + * adjustments to the readahead size in various special cases and these * are best discovered by reading the code. * - * The above calculation determines the readahead, to which any requested - * read size may be added. + * The above calculation, based on the previous readahead size, + * determines the size of the readahead, to which any requested read + * size may be added. * * Readahead requests are sent to the filesystem using the ->readahead() * address space operation, for which mpage_readahead() is a canonical * implementation. ->readahead() should normally initiate reads on all - * pages, but may fail to read any or all pages without causing an IO + * folios, but may fail to read any or all folios without causing an I/O * error. The page cache reading code will issue a ->readpage() request - * for any page which ->readahead() does not provided, and only an error + * for any folio which ->readahead() did not read, and only an error * from this will be final. * - * ->readahead() will generally call readahead_page() repeatedly to get - * each page from those prepared for read ahead. It may fail to read a - * page by: + * ->readahead() will generally call readahead_folio() repeatedly to get + * each folio from those prepared for readahead. It may fail to read a + * folio by: * - * * not calling readahead_page() sufficiently many times, effectively - * ignoring some pages, as might be appropriate if the path to + * * not calling readahead_folio() sufficiently many times, effectively + * ignoring some folios, as might be appropriate if the path to * storage is congested. * - * * failing to actually submit a read request for a given page, + * * failing to actually submit a read request for a given folio, * possibly due to insufficient resources, or * * * getting an error during subsequent processing of a request. * - * In the last two cases, the page should be unlocked to indicate that - * the read attempt has failed. In the first case the page will be - * unlocked by the caller. + * In the last two cases, the folio should be unlocked by the filesystem + * to indicate that the read attempt has failed. In the first case the + * folio will be unlocked by the VFS. * - * Those pages not in the final ``async_size`` of the request should be + * Those folios not in the final ``async_size`` of the request should be * considered to be important and ->readahead() should not fail them due * to congestion or temporary resource unavailability, but should wait * for necessary resources (e.g. memory or indexing information) to - * become available. Pages in the final ``async_size`` may be + * become available. Folios in the final ``async_size`` may be * considered less urgent and failure to read them is more acceptable. - * In this case it is best to use delete_from_page_cache() to remove the - * pages from the page cache as is automatically done for pages that - * were not fetched with readahead_page(). This will allow a - * subsequent synchronous read ahead request to try them again. If they + * In this case it is best to use filemap_remove_folio() to remove the + * folios from the page cache as is automatically done for folios that + * were not fetched with readahead_folio(). This will allow a + * subsequent synchronous readahead request to try them again. If they * are left in the page cache, then they will be read individually using - * ->readpage(). - * + * ->readpage() which may be less efficient. */ #include @@ -157,7 +157,7 @@ static void read_pages(struct readahead_control *rac) aops->readahead(rac); /* * Clean up the remaining pages. The sizes in ->ra - * maybe be used to size next read-ahead, so make sure + * may be used to size the next readahead, so make sure * they accurately reflect what happened. */ while ((page = readahead_page(rac))) { @@ -420,7 +420,7 @@ static pgoff_t count_history_pages(struct address_space *mapping, } /* - * page cache context based read-ahead + * page cache context based readahead */ static int try_context_readahead(struct address_space *mapping, struct file_ra_state *ra, @@ -671,9 +671,9 @@ void page_cache_sync_ra(struct readahead_control *ractl, bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); /* - * Even if read-ahead is disabled, issue this request as read-ahead + * Even if readahead is disabled, issue this request as readahead * as we'll need it to satisfy the requested range. The forced - * read-ahead will do the right thing and limit the read to just the + * readahead will do the right thing and limit the read to just the * requested range, which we'll set to 1 page for this case. */ if (!ractl->ra->ra_pages || blk_cgroup_congested()) { @@ -689,7 +689,6 @@ void page_cache_sync_ra(struct readahead_control *ractl, return; } - /* do read-ahead */ ondemand_readahead(ractl, NULL, req_count); } EXPORT_SYMBOL_GPL(page_cache_sync_ra); @@ -697,7 +696,7 @@ EXPORT_SYMBOL_GPL(page_cache_sync_ra); void page_cache_async_ra(struct readahead_control *ractl, struct folio *folio, unsigned long req_count) { - /* no read-ahead */ + /* no readahead */ if (!ractl->ra->ra_pages) return; @@ -712,7 +711,6 @@ void page_cache_async_ra(struct readahead_control *ractl, if (blk_cgroup_congested()) return; - /* do read-ahead */ ondemand_readahead(ractl, folio, req_count); } EXPORT_SYMBOL_GPL(page_cache_async_ra); -- cgit v1.2.3-59-g8ed1b From 800ba29547e16d5fbe67ca764ba660e049e9f1bf Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 19 Feb 2022 23:19:49 -0500 Subject: fs: Pass an iocb to generic_perform_write() We can extract both the file pointer and the pos from the iocb. This simplifies each caller as well as allowing generic_perform_write() to see more of the iocb contents in the future. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Christian Brauner Reviewed-by: Al Viro Acked-by: Al Viro --- fs/ceph/file.c | 2 +- fs/ext4/file.c | 2 +- fs/f2fs/file.c | 2 +- fs/nfs/file.c | 2 +- include/linux/fs.h | 2 +- mm/filemap.c | 10 ++++++---- 6 files changed, 11 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index feb75eb1cd82..6c9e837aa1d3 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1869,7 +1869,7 @@ retry_snap: * are pending vmtruncate. So write and vmtruncate * can not run at the same time */ - written = generic_perform_write(file, from, pos); + written = generic_perform_write(iocb, from); if (likely(written >= 0)) iocb->ki_pos = pos + written; ceph_end_io_write(inode); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 8bd66cdc41be..6feb07e3e1eb 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -267,7 +267,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, goto out; current->backing_dev_info = inode_to_bdi(inode); - ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos); + ret = generic_perform_write(iocb, from); current->backing_dev_info = NULL; out: diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index d3f39a704b8b..5b89af0f27f0 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4448,7 +4448,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, return -EOPNOTSUPP; current->backing_dev_info = inode_to_bdi(inode); - ret = generic_perform_write(file, from, iocb->ki_pos); + ret = generic_perform_write(iocb, from); current->backing_dev_info = NULL; if (ret > 0) { diff --git a/fs/nfs/file.c b/fs/nfs/file.c index b0ca244c50d0..150b7fa8f0a7 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -646,7 +646,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) result = generic_write_checks(iocb, from); if (result > 0) { current->backing_dev_info = inode_to_bdi(inode); - result = generic_perform_write(file, from, iocb->ki_pos); + result = generic_perform_write(iocb, from); current->backing_dev_info = NULL; } nfs_end_io_write(inode); diff --git a/include/linux/fs.h b/include/linux/fs.h index 8ff28939de60..468dc7ec821f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2999,7 +2999,7 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); -extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); +ssize_t generic_perform_write(struct kiocb *, struct iov_iter *); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); diff --git a/mm/filemap.c b/mm/filemap.c index d904cd7e4181..3a5ffb5587cd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3752,9 +3752,10 @@ out: } EXPORT_SYMBOL(generic_file_direct_write); -ssize_t generic_perform_write(struct file *file, - struct iov_iter *i, loff_t pos) +ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) { + struct file *file = iocb->ki_filp; + loff_t pos = iocb->ki_pos; struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; @@ -3884,7 +3885,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) goto out; - status = generic_perform_write(file, from, pos = iocb->ki_pos); + pos = iocb->ki_pos; + status = generic_perform_write(iocb, from); /* * If generic_perform_write() returned a synchronous error * then we want to return the number of bytes which were @@ -3916,7 +3918,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) */ } } else { - written = generic_perform_write(file, from, iocb->ki_pos); + written = generic_perform_write(iocb, from); if (likely(written > 0)) iocb->ki_pos += written; } -- cgit v1.2.3-59-g8ed1b